aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/apparentlymart/go-cidr/LICENSE19
-rw-r--r--vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go112
-rw-r--r--vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go38
-rw-r--r--vendor/github.com/aws/aws-sdk-go/LICENSE.txt202
-rw-r--r--vendor/github.com/aws/aws-sdk-go/NOTICE.txt3
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go145
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go194
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go108
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go27
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go222
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go113
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go89
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/client.go149
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go96
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/config.go470
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/context.go71
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go41
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go9
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/convert_types.go369
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go226
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go17
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go102
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go246
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go178
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go191
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go78
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go151
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go57
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go298
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go163
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/doc.go56
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go162
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go124
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go133
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go2174
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go66
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go439
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go303
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go337
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/errors.go17
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/logger.go112
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go19
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_appengine.go11
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go225
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go24
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go58
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request.go575
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go21
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go9
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go14
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go14
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go236
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go154
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go94
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/validation.go234
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go287
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/doc.go273
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go208
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/session.go590
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go295
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go82
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go7
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go24
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go761
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/types.go118
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/url.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go29
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/version.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go75
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go36
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go237
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go35
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go66
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go290
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go45
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go227
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go69
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go21
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go296
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go260
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go147
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/api.go19245
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go106
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go36
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go46
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/doc.go78
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go109
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/errors.go48
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go162
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go28
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/service.go93
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/sse.go44
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go35
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go103
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go214
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/api.go2365
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/doc.go124
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/errors.go73
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/service.go93
-rw-r--r--vendor/github.com/bgentry/go-netrc/LICENSE20
-rw-r--r--vendor/github.com/bgentry/go-netrc/netrc/netrc.go510
-rw-r--r--vendor/github.com/davecgh/go-spew/LICENSE15
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypass.go152
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypasssafe.go38
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/common.go341
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/config.go306
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/doc.go211
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/dump.go509
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/format.go419
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/spew.go148
-rw-r--r--vendor/github.com/go-ini/ini/LICENSE191
-rw-r--r--vendor/github.com/go-ini/ini/Makefile12
-rw-r--r--vendor/github.com/go-ini/ini/README.md734
-rw-r--r--vendor/github.com/go-ini/ini/README_ZH.md721
-rw-r--r--vendor/github.com/go-ini/ini/error.go32
-rw-r--r--vendor/github.com/go-ini/ini/ini.go535
-rw-r--r--vendor/github.com/go-ini/ini/key.go633
-rw-r--r--vendor/github.com/go-ini/ini/parser.go356
-rw-r--r--vendor/github.com/go-ini/ini/section.go221
-rw-r--r--vendor/github.com/go-ini/ini/struct.go431
-rw-r--r--vendor/github.com/hashicorp/errwrap/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/errwrap/README.md89
-rw-r--r--vendor/github.com/hashicorp/errwrap/errwrap.go169
-rw-r--r--vendor/github.com/hashicorp/go-getter/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/go-getter/README.md253
-rw-r--r--vendor/github.com/hashicorp/go-getter/appveyor.yml16
-rw-r--r--vendor/github.com/hashicorp/go-getter/client.go335
-rw-r--r--vendor/github.com/hashicorp/go-getter/client_mode.go24
-rw-r--r--vendor/github.com/hashicorp/go-getter/copy_dir.go78
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress.go29
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_bzip2.go45
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_gzip.go49
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_tbz2.go95
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_testing.go134
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_tgz.go99
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_zip.go96
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect.go97
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_bitbucket.go66
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_file.go67
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_github.go73
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_s3.go61
-rw-r--r--vendor/github.com/hashicorp/go-getter/folder_storage.go65
-rw-r--r--vendor/github.com/hashicorp/go-getter/get.go139
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_file.go32
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_file_unix.go103
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_file_windows.go120
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_git.go225
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_hg.go131
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_http.go219
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_mock.go52
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_s3.go243
-rw-r--r--vendor/github.com/hashicorp/go-getter/helper/url/url.go14
-rw-r--r--vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go11
-rw-r--r--vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go40
-rw-r--r--vendor/github.com/hashicorp/go-getter/netrc.go67
-rw-r--r--vendor/github.com/hashicorp/go-getter/source.go36
-rw-r--r--vendor/github.com/hashicorp/go-getter/storage.go13
-rw-r--r--vendor/github.com/hashicorp/go-multierror/LICENSE353
-rw-r--r--vendor/github.com/hashicorp/go-multierror/README.md91
-rw-r--r--vendor/github.com/hashicorp/go-multierror/append.go37
-rw-r--r--vendor/github.com/hashicorp/go-multierror/flatten.go26
-rw-r--r--vendor/github.com/hashicorp/go-multierror/format.go23
-rw-r--r--vendor/github.com/hashicorp/go-multierror/multierror.go51
-rw-r--r--vendor/github.com/hashicorp/go-multierror/prefix.go37
-rw-r--r--vendor/github.com/hashicorp/go-plugin/LICENSE353
-rw-r--r--vendor/github.com/hashicorp/go-plugin/README.md161
-rw-r--r--vendor/github.com/hashicorp/go-plugin/client.go581
-rw-r--r--vendor/github.com/hashicorp/go-plugin/discover.go28
-rw-r--r--vendor/github.com/hashicorp/go-plugin/error.go24
-rw-r--r--vendor/github.com/hashicorp/go-plugin/mux_broker.go204
-rw-r--r--vendor/github.com/hashicorp/go-plugin/plugin.go25
-rw-r--r--vendor/github.com/hashicorp/go-plugin/process.go24
-rw-r--r--vendor/github.com/hashicorp/go-plugin/process_posix.go19
-rw-r--r--vendor/github.com/hashicorp/go-plugin/process_windows.go29
-rw-r--r--vendor/github.com/hashicorp/go-plugin/rpc_client.go123
-rw-r--r--vendor/github.com/hashicorp/go-plugin/rpc_server.go185
-rw-r--r--vendor/github.com/hashicorp/go-plugin/server.go222
-rw-r--r--vendor/github.com/hashicorp/go-plugin/server_mux.go31
-rw-r--r--vendor/github.com/hashicorp/go-plugin/stream.go18
-rw-r--r--vendor/github.com/hashicorp/go-plugin/testing.go76
-rw-r--r--vendor/github.com/hashicorp/go-uuid/LICENSE363
-rw-r--r--vendor/github.com/hashicorp/go-uuid/README.md8
-rw-r--r--vendor/github.com/hashicorp/go-uuid/uuid.go57
-rw-r--r--vendor/github.com/hashicorp/go-version/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/go-version/README.md65
-rw-r--r--vendor/github.com/hashicorp/go-version/constraint.go178
-rw-r--r--vendor/github.com/hashicorp/go-version/version.go308
-rw-r--r--vendor/github.com/hashicorp/go-version/version_collection.go17
-rw-r--r--vendor/github.com/hashicorp/hcl/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/hcl/Makefile18
-rw-r--r--vendor/github.com/hashicorp/hcl/README.md125
-rw-r--r--vendor/github.com/hashicorp/hcl/appveyor.yml19
-rw-r--r--vendor/github.com/hashicorp/hcl/decoder.go724
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl.go11
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/ast/ast.go219
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/ast/walk.go52
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/parser/error.go17
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/parser/parser.go520
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go651
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go241
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/token/position.go46
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/token/token.go219
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/flatten.go117
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/parser.go313
-rw-r--r--vendor/github.com/hashicorp/hcl/json/scanner/scanner.go451
-rw-r--r--vendor/github.com/hashicorp/hcl/json/token/position.go46
-rw-r--r--vendor/github.com/hashicorp/hcl/json/token/token.go118
-rw-r--r--vendor/github.com/hashicorp/hcl/lex.go38
-rw-r--r--vendor/github.com/hashicorp/hcl/parse.go39
-rw-r--r--vendor/github.com/hashicorp/hil/LICENSE353
-rw-r--r--vendor/github.com/hashicorp/hil/README.md102
-rw-r--r--vendor/github.com/hashicorp/hil/appveyor.yml18
-rw-r--r--vendor/github.com/hashicorp/hil/ast/arithmetic.go43
-rw-r--r--vendor/github.com/hashicorp/hil/ast/arithmetic_op.go24
-rw-r--r--vendor/github.com/hashicorp/hil/ast/ast.go99
-rw-r--r--vendor/github.com/hashicorp/hil/ast/call.go47
-rw-r--r--vendor/github.com/hashicorp/hil/ast/conditional.go36
-rw-r--r--vendor/github.com/hashicorp/hil/ast/index.go76
-rw-r--r--vendor/github.com/hashicorp/hil/ast/literal.go88
-rw-r--r--vendor/github.com/hashicorp/hil/ast/output.go78
-rw-r--r--vendor/github.com/hashicorp/hil/ast/scope.go90
-rw-r--r--vendor/github.com/hashicorp/hil/ast/stack.go25
-rw-r--r--vendor/github.com/hashicorp/hil/ast/type_string.go54
-rw-r--r--vendor/github.com/hashicorp/hil/ast/unknown.go30
-rw-r--r--vendor/github.com/hashicorp/hil/ast/variable_access.go36
-rw-r--r--vendor/github.com/hashicorp/hil/ast/variables_helper.go63
-rw-r--r--vendor/github.com/hashicorp/hil/builtins.go331
-rw-r--r--vendor/github.com/hashicorp/hil/check_identifier.go88
-rw-r--r--vendor/github.com/hashicorp/hil/check_types.go668
-rw-r--r--vendor/github.com/hashicorp/hil/convert.go159
-rw-r--r--vendor/github.com/hashicorp/hil/eval.go472
-rw-r--r--vendor/github.com/hashicorp/hil/eval_type.go16
-rw-r--r--vendor/github.com/hashicorp/hil/evaltype_string.go42
-rw-r--r--vendor/github.com/hashicorp/hil/parse.go29
-rw-r--r--vendor/github.com/hashicorp/hil/parser/binary_op.go45
-rw-r--r--vendor/github.com/hashicorp/hil/parser/error.go38
-rw-r--r--vendor/github.com/hashicorp/hil/parser/fuzz.go28
-rw-r--r--vendor/github.com/hashicorp/hil/parser/parser.go522
-rw-r--r--vendor/github.com/hashicorp/hil/scanner/peeker.go55
-rw-r--r--vendor/github.com/hashicorp/hil/scanner/scanner.go550
-rw-r--r--vendor/github.com/hashicorp/hil/scanner/token.go105
-rw-r--r--vendor/github.com/hashicorp/hil/scanner/tokentype_string.go51
-rw-r--r--vendor/github.com/hashicorp/hil/transform_fixed.go29
-rw-r--r--vendor/github.com/hashicorp/hil/walk.go266
-rw-r--r--vendor/github.com/hashicorp/logutils/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/logutils/README.md36
-rw-r--r--vendor/github.com/hashicorp/logutils/level.go81
-rw-r--r--vendor/github.com/hashicorp/terraform/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/terraform/config/append.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config.go1096
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config_string.go338
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config_terraform.go117
-rw-r--r--vendor/github.com/hashicorp/terraform/config/config_tree.go43
-rw-r--r--vendor/github.com/hashicorp/terraform/config/import_tree.go113
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate.go386
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go1390
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate_walk.go283
-rw-r--r--vendor/github.com/hashicorp/terraform/config/lang.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/config/loader.go224
-rw-r--r--vendor/github.com/hashicorp/terraform/config/loader_hcl.go1130
-rw-r--r--vendor/github.com/hashicorp/terraform/config/merge.go193
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/copy_dir.go114
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/get.go71
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/inode.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/inode_windows.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/module.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/testing.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/tree.go428
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/tree_gob.go57
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go118
-rw-r--r--vendor/github.com/hashicorp/terraform/config/provisioner_enums.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/config/raw_config.go335
-rw-r--r--vendor/github.com/hashicorp/terraform/config/resource_mode.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/config/resource_mode_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/config/testing.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/dag.go286
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/dot.go282
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/edge.go37
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/graph.go391
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/marshal.go462
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/set.go109
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/tarjan.go107
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/walk.go445
-rw-r--r--vendor/github.com/hashicorp/terraform/flatmap/expand.go147
-rw-r--r--vendor/github.com/hashicorp/terraform/flatmap/flatten.go71
-rw-r--r--vendor/github.com/hashicorp/terraform/flatmap/map.go82
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/acctest/random.go93
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/config/decode.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/config/validator.go214
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go154
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/experiment/id.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go41
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/logging/logging.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/logging/transport.go53
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/error.go79
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/id.go39
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/map.go140
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/resource.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/state.go259
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing.go790
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go160
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go141
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/wait.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/README.md11
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/backend.go94
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/equal.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go334
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go333
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go208
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go232
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go319
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provider.go400
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go180
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource.go478
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go502
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go52
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go237
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/schema.go1537
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/serialize.go125
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/set.go209
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/testing.go30
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/closer.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go128
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go151
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go66
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/shadow/value.go79
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/validation/validation.go108
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/plugin.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/resource_provider.go578
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go173
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/serve.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/ui_input.go51
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/ui_output.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context.go1022
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_components.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go32
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_import.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/debug.go523
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/diff.go866
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_apply.go359
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go347
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go208
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count.go58
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_diff.go478
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_error.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_filter.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_if.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_noop.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_output.go119
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provider.go164
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go47
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go139
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go55
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_resource.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_state.go324
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate.go227
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go74
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_variable.go279
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go119
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph.go172
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go141
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go67
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go164
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_dot.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk.go60
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go157
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook.go137
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_mock.go245
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_stop.go87
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/interpolate.go782
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go198
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go125
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go35
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go85
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go44
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go240
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go357
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go288
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go83
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go53
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go190
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go158
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/path.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/plan.go153
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource.go360
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_address.go301
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider.go204
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go297
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go72
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/semantics.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_components.go273
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_context.go158
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go815
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go282
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state.go2118
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_add.go374
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_filter.go267
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go189
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go142
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_v1.go145
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/testing.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform.go52
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go68
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config.go135
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go168
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go257
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go269
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_diff.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_expand.go48
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go241
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go120
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go110
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go64
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_output.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider.go380
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go206
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_reference.go321
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go51
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_root.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_state.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_targets.go144
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_variable.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go44
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/util.go93
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/variables.go166
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version.go31
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version_required.go69
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go16
-rw-r--r--vendor/github.com/hashicorp/yamux/LICENSE362
-rw-r--r--vendor/github.com/hashicorp/yamux/README.md86
-rw-r--r--vendor/github.com/hashicorp/yamux/addr.go60
-rw-r--r--vendor/github.com/hashicorp/yamux/const.go157
-rw-r--r--vendor/github.com/hashicorp/yamux/mux.go87
-rw-r--r--vendor/github.com/hashicorp/yamux/session.go623
-rw-r--r--vendor/github.com/hashicorp/yamux/spec.md140
-rw-r--r--vendor/github.com/hashicorp/yamux/stream.go457
-rw-r--r--vendor/github.com/hashicorp/yamux/util.go28
-rw-r--r--vendor/github.com/jmespath/go-jmespath/LICENSE13
-rw-r--r--vendor/github.com/jmespath/go-jmespath/Makefile44
-rw-r--r--vendor/github.com/jmespath/go-jmespath/README.md7
-rw-r--r--vendor/github.com/jmespath/go-jmespath/api.go49
-rw-r--r--vendor/github.com/jmespath/go-jmespath/astnodetype_string.go16
-rw-r--r--vendor/github.com/jmespath/go-jmespath/functions.go842
-rw-r--r--vendor/github.com/jmespath/go-jmespath/interpreter.go418
-rw-r--r--vendor/github.com/jmespath/go-jmespath/lexer.go420
-rw-r--r--vendor/github.com/jmespath/go-jmespath/parser.go603
-rw-r--r--vendor/github.com/jmespath/go-jmespath/toktype_string.go16
-rw-r--r--vendor/github.com/jmespath/go-jmespath/util.go185
-rw-r--r--vendor/github.com/mitchellh/copystructure/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/copystructure/README.md21
-rw-r--r--vendor/github.com/mitchellh/copystructure/copier_time.go15
-rw-r--r--vendor/github.com/mitchellh/copystructure/copystructure.go477
-rw-r--r--vendor/github.com/mitchellh/go-homedir/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/go-homedir/README.md14
-rw-r--r--vendor/github.com/mitchellh/go-homedir/homedir.go137
-rw-r--r--vendor/github.com/mitchellh/hashstructure/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/hashstructure/README.md61
-rw-r--r--vendor/github.com/mitchellh/hashstructure/hashstructure.go323
-rw-r--r--vendor/github.com/mitchellh/hashstructure/include.go15
-rw-r--r--vendor/github.com/mitchellh/mapstructure/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/mapstructure/README.md46
-rw-r--r--vendor/github.com/mitchellh/mapstructure/decode_hooks.go154
-rw-r--r--vendor/github.com/mitchellh/mapstructure/error.go50
-rw-r--r--vendor/github.com/mitchellh/mapstructure/mapstructure.go823
-rw-r--r--vendor/github.com/mitchellh/reflectwalk/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/reflectwalk/README.md6
-rw-r--r--vendor/github.com/mitchellh/reflectwalk/location.go17
-rw-r--r--vendor/github.com/mitchellh/reflectwalk/location_string.go16
-rw-r--r--vendor/github.com/mitchellh/reflectwalk/reflectwalk.go339
-rw-r--r--vendor/github.com/satori/go.uuid/LICENSE20
-rw-r--r--vendor/github.com/satori/go.uuid/README.md65
-rw-r--r--vendor/github.com/satori/go.uuid/uuid.go481
-rw-r--r--vendor/golang.org/x/crypto/LICENSE27
-rw-r--r--vendor/golang.org/x/crypto/PATENTS22
-rw-r--r--vendor/golang.org/x/crypto/curve25519/const_amd64.h8
-rw-r--r--vendor/golang.org/x/crypto/curve25519/const_amd64.s20
-rw-r--r--vendor/golang.org/x/crypto/curve25519/cswap_amd64.s88
-rw-r--r--vendor/golang.org/x/crypto/curve25519/curve25519.go841
-rw-r--r--vendor/golang.org/x/crypto/curve25519/doc.go23
-rw-r--r--vendor/golang.org/x/crypto/curve25519/freeze_amd64.s73
-rw-r--r--vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s1377
-rw-r--r--vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go240
-rw-r--r--vendor/golang.org/x/crypto/curve25519/mul_amd64.s169
-rw-r--r--vendor/golang.org/x/crypto/curve25519/square_amd64.s132
-rw-r--r--vendor/golang.org/x/crypto/ed25519/ed25519.go181
-rw-r--r--vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go1422
-rw-r--r--vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go1771
-rw-r--r--vendor/golang.org/x/crypto/ssh/buffer.go98
-rw-r--r--vendor/golang.org/x/crypto/ssh/certs.go503
-rw-r--r--vendor/golang.org/x/crypto/ssh/channel.go633
-rw-r--r--vendor/golang.org/x/crypto/ssh/cipher.go627
-rw-r--r--vendor/golang.org/x/crypto/ssh/client.go211
-rw-r--r--vendor/golang.org/x/crypto/ssh/client_auth.go475
-rw-r--r--vendor/golang.org/x/crypto/ssh/common.go371
-rw-r--r--vendor/golang.org/x/crypto/ssh/connection.go143
-rw-r--r--vendor/golang.org/x/crypto/ssh/doc.go18
-rw-r--r--vendor/golang.org/x/crypto/ssh/handshake.go625
-rw-r--r--vendor/golang.org/x/crypto/ssh/kex.go540
-rw-r--r--vendor/golang.org/x/crypto/ssh/keys.go905
-rw-r--r--vendor/golang.org/x/crypto/ssh/mac.go61
-rw-r--r--vendor/golang.org/x/crypto/ssh/messages.go758
-rw-r--r--vendor/golang.org/x/crypto/ssh/mux.go330
-rw-r--r--vendor/golang.org/x/crypto/ssh/server.go491
-rw-r--r--vendor/golang.org/x/crypto/ssh/session.go627
-rw-r--r--vendor/golang.org/x/crypto/ssh/tcpip.go407
-rw-r--r--vendor/golang.org/x/crypto/ssh/transport.go375
-rw-r--r--vendor/vendor.json538
575 files changed, 124826 insertions, 0 deletions
diff --git a/vendor/github.com/apparentlymart/go-cidr/LICENSE b/vendor/github.com/apparentlymart/go-cidr/LICENSE
new file mode 100644
index 0000000..2125378
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-cidr/LICENSE
@@ -0,0 +1,19 @@
1Copyright (c) 2015 Martin Atkins
2
3Permission is hereby granted, free of charge, to any person obtaining a copy
4of this software and associated documentation files (the "Software"), to deal
5in the Software without restriction, including without limitation the rights
6to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7copies of the Software, and to permit persons to whom the Software is
8furnished to do so, subject to the following conditions:
9
10The above copyright notice and this permission notice shall be included in
11all copies or substantial portions of the Software.
12
13THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19THE SOFTWARE.
diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go
new file mode 100644
index 0000000..a31cdec
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go
@@ -0,0 +1,112 @@
1// Package cidr is a collection of assorted utilities for computing
2// network and host addresses within network ranges.
3//
4// It expects a CIDR-type address structure where addresses are divided into
5// some number of prefix bits representing the network and then the remaining
6// suffix bits represent the host.
7//
8// For example, it can help to calculate addresses for sub-networks of a
9// parent network, or to calculate host addresses within a particular prefix.
10//
11// At present this package is prioritizing simplicity of implementation and
12// de-prioritizing speed and memory usage. Thus caution is advised before
13// using this package in performance-critical applications or hot codepaths.
14// Patches to improve the speed and memory usage may be accepted as long as
15// they do not result in a significant increase in code complexity.
16package cidr
17
18import (
19 "fmt"
20 "math/big"
21 "net"
22)
23
24// Subnet takes a parent CIDR range and creates a subnet within it
25// with the given number of additional prefix bits and the given
26// network number.
27//
28// For example, 10.3.0.0/16, extended by 8 bits, with a network number
29// of 5, becomes 10.3.5.0/24 .
30func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) {
31 ip := base.IP
32 mask := base.Mask
33
34 parentLen, addrLen := mask.Size()
35 newPrefixLen := parentLen + newBits
36
37 if newPrefixLen > addrLen {
38 return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits)
39 }
40
41 maxNetNum := uint64(1<<uint64(newBits)) - 1
42 if uint64(num) > maxNetNum {
43 return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num)
44 }
45
46 return &net.IPNet{
47 IP: insertNumIntoIP(ip, num, newPrefixLen),
48 Mask: net.CIDRMask(newPrefixLen, addrLen),
49 }, nil
50}
51
52// Host takes a parent CIDR range and turns it into a host IP address with
53// the given host number.
54//
55// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2.
56func Host(base *net.IPNet, num int) (net.IP, error) {
57 ip := base.IP
58 mask := base.Mask
59
60 parentLen, addrLen := mask.Size()
61 hostLen := addrLen - parentLen
62
63 maxHostNum := uint64(1<<uint64(hostLen)) - 1
64
65 numUint64 := uint64(num)
66 if num < 0 {
67 numUint64 = uint64(-num) - 1
68 num = int(maxHostNum - numUint64)
69 }
70
71 if numUint64 > maxHostNum {
72 return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num)
73 }
74
75 return insertNumIntoIP(ip, num, 32), nil
76}
77
78// AddressRange returns the first and last addresses in the given CIDR range.
79func AddressRange(network *net.IPNet) (net.IP, net.IP) {
80 // the first IP is easy
81 firstIP := network.IP
82
83 // the last IP is the network address OR NOT the mask address
84 prefixLen, bits := network.Mask.Size()
85 if prefixLen == bits {
86 // Easy!
87 // But make sure that our two slices are distinct, since they
88 // would be in all other cases.
89 lastIP := make([]byte, len(firstIP))
90 copy(lastIP, firstIP)
91 return firstIP, lastIP
92 }
93
94 firstIPInt, bits := ipToInt(firstIP)
95 hostLen := uint(bits) - uint(prefixLen)
96 lastIPInt := big.NewInt(1)
97 lastIPInt.Lsh(lastIPInt, hostLen)
98 lastIPInt.Sub(lastIPInt, big.NewInt(1))
99 lastIPInt.Or(lastIPInt, firstIPInt)
100
101 return firstIP, intToIP(lastIPInt, bits)
102}
103
104// AddressCount returns the number of distinct host addresses within the given
105// CIDR range.
106//
107// Since the result is a uint64, this function returns meaningful information
108// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65.
109func AddressCount(network *net.IPNet) uint64 {
110 prefixLen, bits := network.Mask.Size()
111 return 1 << (uint64(bits) - uint64(prefixLen))
112}
diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go
new file mode 100644
index 0000000..861a5f6
--- /dev/null
+++ b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go
@@ -0,0 +1,38 @@
1package cidr
2
3import (
4 "fmt"
5 "math/big"
6 "net"
7)
8
9func ipToInt(ip net.IP) (*big.Int, int) {
10 val := &big.Int{}
11 val.SetBytes([]byte(ip))
12 if len(ip) == net.IPv4len {
13 return val, 32
14 } else if len(ip) == net.IPv6len {
15 return val, 128
16 } else {
17 panic(fmt.Errorf("Unsupported address length %d", len(ip)))
18 }
19}
20
21func intToIP(ipInt *big.Int, bits int) net.IP {
22 ipBytes := ipInt.Bytes()
23 ret := make([]byte, bits/8)
24 // Pack our IP bytes into the end of the return array,
25 // since big.Int.Bytes() removes front zero padding.
26 for i := 1; i <= len(ipBytes); i++ {
27 ret[len(ret)-i] = ipBytes[len(ipBytes)-i]
28 }
29 return net.IP(ret)
30}
31
32func insertNumIntoIP(ip net.IP, num int, prefixLen int) net.IP {
33 ipInt, totalBits := ipToInt(ip)
34 bigNum := big.NewInt(int64(num))
35 bigNum.Lsh(bigNum, uint(totalBits-prefixLen))
36 ipInt.Or(ipInt, bigNum)
37 return intToIP(ipInt, totalBits)
38}
diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
@@ -0,0 +1,202 @@
1
2 Apache License
3 Version 2.0, January 2004
4 http://www.apache.org/licenses/
5
6 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
8 1. Definitions.
9
10 "License" shall mean the terms and conditions for use, reproduction,
11 and distribution as defined by Sections 1 through 9 of this document.
12
13 "Licensor" shall mean the copyright owner or entity authorized by
14 the copyright owner that is granting the License.
15
16 "Legal Entity" shall mean the union of the acting entity and all
17 other entities that control, are controlled by, or are under common
18 control with that entity. For the purposes of this definition,
19 "control" means (i) the power, direct or indirect, to cause the
20 direction or management of such entity, whether by contract or
21 otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 outstanding shares, or (iii) beneficial ownership of such entity.
23
24 "You" (or "Your") shall mean an individual or Legal Entity
25 exercising permissions granted by this License.
26
27 "Source" form shall mean the preferred form for making modifications,
28 including but not limited to software source code, documentation
29 source, and configuration files.
30
31 "Object" form shall mean any form resulting from mechanical
32 transformation or translation of a Source form, including but
33 not limited to compiled object code, generated documentation,
34 and conversions to other media types.
35
36 "Work" shall mean the work of authorship, whether in Source or
37 Object form, made available under the License, as indicated by a
38 copyright notice that is included in or attached to the work
39 (an example is provided in the Appendix below).
40
41 "Derivative Works" shall mean any work, whether in Source or Object
42 form, that is based on (or derived from) the Work and for which the
43 editorial revisions, annotations, elaborations, or other modifications
44 represent, as a whole, an original work of authorship. For the purposes
45 of this License, Derivative Works shall not include works that remain
46 separable from, or merely link (or bind by name) to the interfaces of,
47 the Work and Derivative Works thereof.
48
49 "Contribution" shall mean any work of authorship, including
50 the original version of the Work and any modifications or additions
51 to that Work or Derivative Works thereof, that is intentionally
52 submitted to Licensor for inclusion in the Work by the copyright owner
53 or by an individual or Legal Entity authorized to submit on behalf of
54 the copyright owner. For the purposes of this definition, "submitted"
55 means any form of electronic, verbal, or written communication sent
56 to the Licensor or its representatives, including but not limited to
57 communication on electronic mailing lists, source code control systems,
58 and issue tracking systems that are managed by, or on behalf of, the
59 Licensor for the purpose of discussing and improving the Work, but
60 excluding communication that is conspicuously marked or otherwise
61 designated in writing by the copyright owner as "Not a Contribution."
62
63 "Contributor" shall mean Licensor and any individual or Legal Entity
64 on behalf of whom a Contribution has been received by Licensor and
65 subsequently incorporated within the Work.
66
67 2. Grant of Copyright License. Subject to the terms and conditions of
68 this License, each Contributor hereby grants to You a perpetual,
69 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 copyright license to reproduce, prepare Derivative Works of,
71 publicly display, publicly perform, sublicense, and distribute the
72 Work and such Derivative Works in Source or Object form.
73
74 3. Grant of Patent License. Subject to the terms and conditions of
75 this License, each Contributor hereby grants to You a perpetual,
76 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 (except as stated in this section) patent license to make, have made,
78 use, offer to sell, sell, import, and otherwise transfer the Work,
79 where such license applies only to those patent claims licensable
80 by such Contributor that are necessarily infringed by their
81 Contribution(s) alone or by combination of their Contribution(s)
82 with the Work to which such Contribution(s) was submitted. If You
83 institute patent litigation against any entity (including a
84 cross-claim or counterclaim in a lawsuit) alleging that the Work
85 or a Contribution incorporated within the Work constitutes direct
86 or contributory patent infringement, then any patent licenses
87 granted to You under this License for that Work shall terminate
88 as of the date such litigation is filed.
89
90 4. Redistribution. You may reproduce and distribute copies of the
91 Work or Derivative Works thereof in any medium, with or without
92 modifications, and in Source or Object form, provided that You
93 meet the following conditions:
94
95 (a) You must give any other recipients of the Work or
96 Derivative Works a copy of this License; and
97
98 (b) You must cause any modified files to carry prominent notices
99 stating that You changed the files; and
100
101 (c) You must retain, in the Source form of any Derivative Works
102 that You distribute, all copyright, patent, trademark, and
103 attribution notices from the Source form of the Work,
104 excluding those notices that do not pertain to any part of
105 the Derivative Works; and
106
107 (d) If the Work includes a "NOTICE" text file as part of its
108 distribution, then any Derivative Works that You distribute must
109 include a readable copy of the attribution notices contained
110 within such NOTICE file, excluding those notices that do not
111 pertain to any part of the Derivative Works, in at least one
112 of the following places: within a NOTICE text file distributed
113 as part of the Derivative Works; within the Source form or
114 documentation, if provided along with the Derivative Works; or,
115 within a display generated by the Derivative Works, if and
116 wherever such third-party notices normally appear. The contents
117 of the NOTICE file are for informational purposes only and
118 do not modify the License. You may add Your own attribution
119 notices within Derivative Works that You distribute, alongside
120 or as an addendum to the NOTICE text from the Work, provided
121 that such additional attribution notices cannot be construed
122 as modifying the License.
123
124 You may add Your own copyright statement to Your modifications and
125 may provide additional or different license terms and conditions
126 for use, reproduction, or distribution of Your modifications, or
127 for any such Derivative Works as a whole, provided Your use,
128 reproduction, and distribution of the Work otherwise complies with
129 the conditions stated in this License.
130
131 5. Submission of Contributions. Unless You explicitly state otherwise,
132 any Contribution intentionally submitted for inclusion in the Work
133 by You to the Licensor shall be under the terms and conditions of
134 this License, without any additional terms or conditions.
135 Notwithstanding the above, nothing herein shall supersede or modify
136 the terms of any separate license agreement you may have executed
137 with Licensor regarding such Contributions.
138
139 6. Trademarks. This License does not grant permission to use the trade
140 names, trademarks, service marks, or product names of the Licensor,
141 except as required for reasonable and customary use in describing the
142 origin of the Work and reproducing the content of the NOTICE file.
143
144 7. Disclaimer of Warranty. Unless required by applicable law or
145 agreed to in writing, Licensor provides the Work (and each
146 Contributor provides its Contributions) on an "AS IS" BASIS,
147 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 implied, including, without limitation, any warranties or conditions
149 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 PARTICULAR PURPOSE. You are solely responsible for determining the
151 appropriateness of using or redistributing the Work and assume any
152 risks associated with Your exercise of permissions under this License.
153
154 8. Limitation of Liability. In no event and under no legal theory,
155 whether in tort (including negligence), contract, or otherwise,
156 unless required by applicable law (such as deliberate and grossly
157 negligent acts) or agreed to in writing, shall any Contributor be
158 liable to You for damages, including any direct, indirect, special,
159 incidental, or consequential damages of any character arising as a
160 result of this License or out of the use or inability to use the
161 Work (including but not limited to damages for loss of goodwill,
162 work stoppage, computer failure or malfunction, or any and all
163 other commercial damages or losses), even if such Contributor
164 has been advised of the possibility of such damages.
165
166 9. Accepting Warranty or Additional Liability. While redistributing
167 the Work or Derivative Works thereof, You may choose to offer,
168 and charge a fee for, acceptance of support, warranty, indemnity,
169 or other liability obligations and/or rights consistent with this
170 License. However, in accepting such obligations, You may act only
171 on Your own behalf and on Your sole responsibility, not on behalf
172 of any other Contributor, and only if You agree to indemnify,
173 defend, and hold each Contributor harmless for any liability
174 incurred by, or claims asserted against, such Contributor by reason
175 of your accepting any such warranty or additional liability.
176
177 END OF TERMS AND CONDITIONS
178
179 APPENDIX: How to apply the Apache License to your work.
180
181 To apply the Apache License to your work, attach the following
182 boilerplate notice, with the fields enclosed by brackets "[]"
183 replaced with your own identifying information. (Don't include
184 the brackets!) The text should be enclosed in the appropriate
185 comment syntax for the file format. We also recommend that a
186 file or class name and description of purpose be included on the
187 same "printed page" as the copyright notice for easier
188 identification within third-party archives.
189
190 Copyright [yyyy] [name of copyright owner]
191
192 Licensed under the Apache License, Version 2.0 (the "License");
193 you may not use this file except in compliance with the License.
194 You may obtain a copy of the License at
195
196 http://www.apache.org/licenses/LICENSE-2.0
197
198 Unless required by applicable law or agreed to in writing, software
199 distributed under the License is distributed on an "AS IS" BASIS,
200 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 See the License for the specific language governing permissions and
202 limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
new file mode 100644
index 0000000..5f14d11
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
@@ -0,0 +1,3 @@
1AWS SDK for Go
2Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
new file mode 100644
index 0000000..56fdfc2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
@@ -0,0 +1,145 @@
1// Package awserr represents API error interface accessors for the SDK.
2package awserr
3
4// An Error wraps lower level errors with code, message and an original error.
5// The underlying concrete error type may also satisfy other interfaces which
6// can be to used to obtain more specific information about the error.
7//
8// Calling Error() or String() will always include the full information about
9// an error based on its underlying type.
10//
11// Example:
12//
13// output, err := s3manage.Upload(svc, input, opts)
14// if err != nil {
15// if awsErr, ok := err.(awserr.Error); ok {
16// // Get error details
17// log.Println("Error:", awsErr.Code(), awsErr.Message())
18//
19// // Prints out full error message, including original error if there was one.
20// log.Println("Error:", awsErr.Error())
21//
22// // Get original error
23// if origErr := awsErr.OrigErr(); origErr != nil {
24// // operate on original error.
25// }
26// } else {
27// fmt.Println(err.Error())
28// }
29// }
30//
31type Error interface {
32 // Satisfy the generic error interface.
33 error
34
35 // Returns the short phrase depicting the classification of the error.
36 Code() string
37
38 // Returns the error details message.
39 Message() string
40
41 // Returns the original error if one was set. Nil is returned if not set.
42 OrigErr() error
43}
44
45// BatchError is a batch of errors which also wraps lower level errors with
46// code, message, and original errors. Calling Error() will include all errors
47// that occurred in the batch.
48//
49// Deprecated: Replaced with BatchedErrors. Only defined for backwards
50// compatibility.
51type BatchError interface {
52 // Satisfy the generic error interface.
53 error
54
55 // Returns the short phrase depicting the classification of the error.
56 Code() string
57
58 // Returns the error details message.
59 Message() string
60
61 // Returns the original error if one was set. Nil is returned if not set.
62 OrigErrs() []error
63}
64
65// BatchedErrors is a batch of errors which also wraps lower level errors with
66// code, message, and original errors. Calling Error() will include all errors
67// that occurred in the batch.
68//
69// Replaces BatchError
70type BatchedErrors interface {
71 // Satisfy the base Error interface.
72 Error
73
74 // Returns the original error if one was set. Nil is returned if not set.
75 OrigErrs() []error
76}
77
78// New returns an Error object described by the code, message, and origErr.
79//
80// If origErr satisfies the Error interface it will not be wrapped within a new
81// Error object and will instead be returned.
82func New(code, message string, origErr error) Error {
83 var errs []error
84 if origErr != nil {
85 errs = append(errs, origErr)
86 }
87 return newBaseError(code, message, errs)
88}
89
90// NewBatchError returns an BatchedErrors with a collection of errors as an
91// array of errors.
92func NewBatchError(code, message string, errs []error) BatchedErrors {
93 return newBaseError(code, message, errs)
94}
95
96// A RequestFailure is an interface to extract request failure information from
97// an Error such as the request ID of the failed request returned by a service.
98// RequestFailures may not always have a requestID value if the request failed
99// prior to reaching the service such as a connection error.
100//
101// Example:
102//
103// output, err := s3manage.Upload(svc, input, opts)
104// if err != nil {
105// if reqerr, ok := err.(RequestFailure); ok {
106// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
107// } else {
108// log.Println("Error:", err.Error())
109// }
110// }
111//
112// Combined with awserr.Error:
113//
114// output, err := s3manage.Upload(svc, input, opts)
115// if err != nil {
116// if awsErr, ok := err.(awserr.Error); ok {
117// // Generic AWS Error with Code, Message, and original error (if any)
118// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
119//
120// if reqErr, ok := err.(awserr.RequestFailure); ok {
121// // A service error occurred
122// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
123// }
124// } else {
125// fmt.Println(err.Error())
126// }
127// }
128//
129type RequestFailure interface {
130 Error
131
132 // The status code of the HTTP response.
133 StatusCode() int
134
135 // The request ID returned by the service for a request failure. This will
136 // be empty if no request ID is available such as the request failed due
137 // to a connection error.
138 RequestID() string
139}
140
141// NewRequestFailure returns a new request error wrapper for the given Error
142// provided.
143func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
144 return newRequestError(err, statusCode, reqID)
145}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
new file mode 100644
index 0000000..0202a00
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
@@ -0,0 +1,194 @@
1package awserr
2
3import "fmt"
4
5// SprintError returns a string of the formatted error code.
6//
7// Both extra and origErr are optional. If they are included their lines
8// will be added, but if they are not included their lines will be ignored.
9func SprintError(code, message, extra string, origErr error) string {
10 msg := fmt.Sprintf("%s: %s", code, message)
11 if extra != "" {
12 msg = fmt.Sprintf("%s\n\t%s", msg, extra)
13 }
14 if origErr != nil {
15 msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
16 }
17 return msg
18}
19
20// A baseError wraps the code and message which defines an error. It also
21// can be used to wrap an original error object.
22//
23// Should be used as the root for errors satisfying the awserr.Error. Also
24// for any error which does not fit into a specific error wrapper type.
25type baseError struct {
26 // Classification of error
27 code string
28
29 // Detailed information about error
30 message string
31
32 // Optional original error this error is based off of. Allows building
33 // chained errors.
34 errs []error
35}
36
37// newBaseError returns an error object for the code, message, and errors.
38//
39// code is a short no whitespace phrase depicting the classification of
40// the error that is being created.
41//
42// message is the free flow string containing detailed information about the
43// error.
44//
45// origErrs is the error objects which will be nested under the new errors to
46// be returned.
47func newBaseError(code, message string, origErrs []error) *baseError {
48 b := &baseError{
49 code: code,
50 message: message,
51 errs: origErrs,
52 }
53
54 return b
55}
56
57// Error returns the string representation of the error.
58//
59// See ErrorWithExtra for formatting.
60//
61// Satisfies the error interface.
62func (b baseError) Error() string {
63 size := len(b.errs)
64 if size > 0 {
65 return SprintError(b.code, b.message, "", errorList(b.errs))
66 }
67
68 return SprintError(b.code, b.message, "", nil)
69}
70
71// String returns the string representation of the error.
72// Alias for Error to satisfy the stringer interface.
73func (b baseError) String() string {
74 return b.Error()
75}
76
77// Code returns the short phrase depicting the classification of the error.
78func (b baseError) Code() string {
79 return b.code
80}
81
82// Message returns the error details message.
83func (b baseError) Message() string {
84 return b.message
85}
86
87// OrigErr returns the original error if one was set. Nil is returned if no
88// error was set. This only returns the first element in the list. If the full
89// list is needed, use BatchedErrors.
90func (b baseError) OrigErr() error {
91 switch len(b.errs) {
92 case 0:
93 return nil
94 case 1:
95 return b.errs[0]
96 default:
97 if err, ok := b.errs[0].(Error); ok {
98 return NewBatchError(err.Code(), err.Message(), b.errs[1:])
99 }
100 return NewBatchError("BatchedErrors",
101 "multiple errors occurred", b.errs)
102 }
103}
104
105// OrigErrs returns the original errors if one was set. An empty slice is
106// returned if no error was set.
107func (b baseError) OrigErrs() []error {
108 return b.errs
109}
110
111// So that the Error interface type can be included as an anonymous field
112// in the requestError struct and not conflict with the error.Error() method.
113type awsError Error
114
115// A requestError wraps a request or service error.
116//
117// Composed of baseError for code, message, and original error.
118type requestError struct {
119 awsError
120 statusCode int
121 requestID string
122}
123
124// newRequestError returns a wrapped error with additional information for
125// request status code, and service requestID.
126//
127// Should be used to wrap all request which involve service requests. Even if
128// the request failed without a service response, but had an HTTP status code
129// that may be meaningful.
130//
131// Also wraps original errors via the baseError.
132func newRequestError(err Error, statusCode int, requestID string) *requestError {
133 return &requestError{
134 awsError: err,
135 statusCode: statusCode,
136 requestID: requestID,
137 }
138}
139
140// Error returns the string representation of the error.
141// Satisfies the error interface.
142func (r requestError) Error() string {
143 extra := fmt.Sprintf("status code: %d, request id: %s",
144 r.statusCode, r.requestID)
145 return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
146}
147
148// String returns the string representation of the error.
149// Alias for Error to satisfy the stringer interface.
150func (r requestError) String() string {
151 return r.Error()
152}
153
154// StatusCode returns the wrapped status code for the error
155func (r requestError) StatusCode() int {
156 return r.statusCode
157}
158
159// RequestID returns the wrapped requestID
160func (r requestError) RequestID() string {
161 return r.requestID
162}
163
164// OrigErrs returns the original errors if one was set. An empty slice is
165// returned if no error was set.
166func (r requestError) OrigErrs() []error {
167 if b, ok := r.awsError.(BatchedErrors); ok {
168 return b.OrigErrs()
169 }
170 return []error{r.OrigErr()}
171}
172
173// An error list that satisfies the golang interface
174type errorList []error
175
176// Error returns the string representation of the error.
177//
178// Satisfies the error interface.
179func (e errorList) Error() string {
180 msg := ""
181 // How do we want to handle the array size being zero
182 if size := len(e); size > 0 {
183 for i := 0; i < size; i++ {
184 msg += fmt.Sprintf("%s", e[i].Error())
185 // We check the next index to see if it is within the slice.
186 // If it is, then we append a newline. We do this, because unit tests
187 // could be broken with the additional '\n'
188 if i+1 < size {
189 msg += "\n"
190 }
191 }
192 }
193 return msg
194}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
new file mode 100644
index 0000000..1a3d106
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
@@ -0,0 +1,108 @@
1package awsutil
2
3import (
4 "io"
5 "reflect"
6 "time"
7)
8
9// Copy deeply copies a src structure to dst. Useful for copying request and
10// response structures.
11//
12// Can copy between structs of different type, but will only copy fields which
13// are assignable, and exist in both structs. Fields which are not assignable,
14// or do not exist in both structs are ignored.
15func Copy(dst, src interface{}) {
16 dstval := reflect.ValueOf(dst)
17 if !dstval.IsValid() {
18 panic("Copy dst cannot be nil")
19 }
20
21 rcopy(dstval, reflect.ValueOf(src), true)
22}
23
24// CopyOf returns a copy of src while also allocating the memory for dst.
25// src must be a pointer type or this operation will fail.
26func CopyOf(src interface{}) (dst interface{}) {
27 dsti := reflect.New(reflect.TypeOf(src).Elem())
28 dst = dsti.Interface()
29 rcopy(dsti, reflect.ValueOf(src), true)
30 return
31}
32
33// rcopy performs a recursive copy of values from the source to destination.
34//
35// root is used to skip certain aspects of the copy which are not valid
36// for the root node of a object.
37func rcopy(dst, src reflect.Value, root bool) {
38 if !src.IsValid() {
39 return
40 }
41
42 switch src.Kind() {
43 case reflect.Ptr:
44 if _, ok := src.Interface().(io.Reader); ok {
45 if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
46 dst.Elem().Set(src)
47 } else if dst.CanSet() {
48 dst.Set(src)
49 }
50 } else {
51 e := src.Type().Elem()
52 if dst.CanSet() && !src.IsNil() {
53 if _, ok := src.Interface().(*time.Time); !ok {
54 dst.Set(reflect.New(e))
55 } else {
56 tempValue := reflect.New(e)
57 tempValue.Elem().Set(src.Elem())
58 // Sets time.Time's unexported values
59 dst.Set(tempValue)
60 }
61 }
62 if src.Elem().IsValid() {
63 // Keep the current root state since the depth hasn't changed
64 rcopy(dst.Elem(), src.Elem(), root)
65 }
66 }
67 case reflect.Struct:
68 t := dst.Type()
69 for i := 0; i < t.NumField(); i++ {
70 name := t.Field(i).Name
71 srcVal := src.FieldByName(name)
72 dstVal := dst.FieldByName(name)
73 if srcVal.IsValid() && dstVal.CanSet() {
74 rcopy(dstVal, srcVal, false)
75 }
76 }
77 case reflect.Slice:
78 if src.IsNil() {
79 break
80 }
81
82 s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
83 dst.Set(s)
84 for i := 0; i < src.Len(); i++ {
85 rcopy(dst.Index(i), src.Index(i), false)
86 }
87 case reflect.Map:
88 if src.IsNil() {
89 break
90 }
91
92 s := reflect.MakeMap(src.Type())
93 dst.Set(s)
94 for _, k := range src.MapKeys() {
95 v := src.MapIndex(k)
96 v2 := reflect.New(v.Type()).Elem()
97 rcopy(v2, v, false)
98 dst.SetMapIndex(k, v2)
99 }
100 default:
101 // Assign the value if possible. If its not assignable, the value would
102 // need to be converted and the impact of that may be unexpected, or is
103 // not compatible with the dst type.
104 if src.Type().AssignableTo(dst.Type()) {
105 dst.Set(src)
106 }
107 }
108}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
new file mode 100644
index 0000000..59fa4a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
@@ -0,0 +1,27 @@
1package awsutil
2
3import (
4 "reflect"
5)
6
7// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
8// In addition to this, this method will also dereference the input values if
9// possible so the DeepEqual performed will not fail if one parameter is a
10// pointer and the other is not.
11//
12// DeepEqual will not perform indirection of nested values of the input parameters.
13func DeepEqual(a, b interface{}) bool {
14 ra := reflect.Indirect(reflect.ValueOf(a))
15 rb := reflect.Indirect(reflect.ValueOf(b))
16
17 if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
18 // If the elements are both nil, and of the same type the are equal
19 // If they are of different types they are not equal
20 return reflect.TypeOf(a) == reflect.TypeOf(b)
21 } else if raValid != rbValid {
22 // Both values must be valid to be equal
23 return false
24 }
25
26 return reflect.DeepEqual(ra.Interface(), rb.Interface())
27}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
new file mode 100644
index 0000000..11c52c3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
@@ -0,0 +1,222 @@
1package awsutil
2
3import (
4 "reflect"
5 "regexp"
6 "strconv"
7 "strings"
8
9 "github.com/jmespath/go-jmespath"
10)
11
12var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
13
14// rValuesAtPath returns a slice of values found in value v. The values
15// in v are explored recursively so all nested values are collected.
16func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
17 pathparts := strings.Split(path, "||")
18 if len(pathparts) > 1 {
19 for _, pathpart := range pathparts {
20 vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
21 if len(vals) > 0 {
22 return vals
23 }
24 }
25 return nil
26 }
27
28 values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
29 components := strings.Split(path, ".")
30 for len(values) > 0 && len(components) > 0 {
31 var index *int64
32 var indexStar bool
33 c := strings.TrimSpace(components[0])
34 if c == "" { // no actual component, illegal syntax
35 return nil
36 } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
37 // TODO normalize case for user
38 return nil // don't support unexported fields
39 }
40
41 // parse this component
42 if m := indexRe.FindStringSubmatch(c); m != nil {
43 c = m[1]
44 if m[2] == "" {
45 index = nil
46 indexStar = true
47 } else {
48 i, _ := strconv.ParseInt(m[2], 10, 32)
49 index = &i
50 indexStar = false
51 }
52 }
53
54 nextvals := []reflect.Value{}
55 for _, value := range values {
56 // pull component name out of struct member
57 if value.Kind() != reflect.Struct {
58 continue
59 }
60
61 if c == "*" { // pull all members
62 for i := 0; i < value.NumField(); i++ {
63 if f := reflect.Indirect(value.Field(i)); f.IsValid() {
64 nextvals = append(nextvals, f)
65 }
66 }
67 continue
68 }
69
70 value = value.FieldByNameFunc(func(name string) bool {
71 if c == name {
72 return true
73 } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
74 return true
75 }
76 return false
77 })
78
79 if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
80 if !value.IsNil() {
81 value.Set(reflect.Zero(value.Type()))
82 }
83 return []reflect.Value{value}
84 }
85
86 if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
87 // TODO if the value is the terminus it should not be created
88 // if the value to be set to its position is nil.
89 value.Set(reflect.New(value.Type().Elem()))
90 value = value.Elem()
91 } else {
92 value = reflect.Indirect(value)
93 }
94
95 if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
96 if !createPath && value.IsNil() {
97 value = reflect.ValueOf(nil)
98 }
99 }
100
101 if value.IsValid() {
102 nextvals = append(nextvals, value)
103 }
104 }
105 values = nextvals
106
107 if indexStar || index != nil {
108 nextvals = []reflect.Value{}
109 for _, valItem := range values {
110 value := reflect.Indirect(valItem)
111 if value.Kind() != reflect.Slice {
112 continue
113 }
114
115 if indexStar { // grab all indices
116 for i := 0; i < value.Len(); i++ {
117 idx := reflect.Indirect(value.Index(i))
118 if idx.IsValid() {
119 nextvals = append(nextvals, idx)
120 }
121 }
122 continue
123 }
124
125 // pull out index
126 i := int(*index)
127 if i >= value.Len() { // check out of bounds
128 if createPath {
129 // TODO resize slice
130 } else {
131 continue
132 }
133 } else if i < 0 { // support negative indexing
134 i = value.Len() + i
135 }
136 value = reflect.Indirect(value.Index(i))
137
138 if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
139 if !createPath && value.IsNil() {
140 value = reflect.ValueOf(nil)
141 }
142 }
143
144 if value.IsValid() {
145 nextvals = append(nextvals, value)
146 }
147 }
148 values = nextvals
149 }
150
151 components = components[1:]
152 }
153 return values
154}
155
156// ValuesAtPath returns a list of values at the case insensitive lexical
157// path inside of a structure.
158func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
159 result, err := jmespath.Search(path, i)
160 if err != nil {
161 return nil, err
162 }
163
164 v := reflect.ValueOf(result)
165 if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
166 return nil, nil
167 }
168 if s, ok := result.([]interface{}); ok {
169 return s, err
170 }
171 if v.Kind() == reflect.Map && v.Len() == 0 {
172 return nil, nil
173 }
174 if v.Kind() == reflect.Slice {
175 out := make([]interface{}, v.Len())
176 for i := 0; i < v.Len(); i++ {
177 out[i] = v.Index(i).Interface()
178 }
179 return out, nil
180 }
181
182 return []interface{}{result}, nil
183}
184
185// SetValueAtPath sets a value at the case insensitive lexical path inside
186// of a structure.
187func SetValueAtPath(i interface{}, path string, v interface{}) {
188 if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
189 for _, rval := range rvals {
190 if rval.Kind() == reflect.Ptr && rval.IsNil() {
191 continue
192 }
193 setValue(rval, v)
194 }
195 }
196}
197
198func setValue(dstVal reflect.Value, src interface{}) {
199 if dstVal.Kind() == reflect.Ptr {
200 dstVal = reflect.Indirect(dstVal)
201 }
202 srcVal := reflect.ValueOf(src)
203
204 if !srcVal.IsValid() { // src is literal nil
205 if dstVal.CanAddr() {
206 // Convert to pointer so that pointer's value can be nil'ed
207 // dstVal = dstVal.Addr()
208 }
209 dstVal.Set(reflect.Zero(dstVal.Type()))
210
211 } else if srcVal.Kind() == reflect.Ptr {
212 if srcVal.IsNil() {
213 srcVal = reflect.Zero(dstVal.Type())
214 } else {
215 srcVal = reflect.ValueOf(src).Elem()
216 }
217 dstVal.Set(srcVal)
218 } else {
219 dstVal.Set(srcVal)
220 }
221
222}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
new file mode 100644
index 0000000..710eb43
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
@@ -0,0 +1,113 @@
1package awsutil
2
3import (
4 "bytes"
5 "fmt"
6 "io"
7 "reflect"
8 "strings"
9)
10
11// Prettify returns the string representation of a value.
12func Prettify(i interface{}) string {
13 var buf bytes.Buffer
14 prettify(reflect.ValueOf(i), 0, &buf)
15 return buf.String()
16}
17
18// prettify will recursively walk value v to build a textual
19// representation of the value.
20func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
21 for v.Kind() == reflect.Ptr {
22 v = v.Elem()
23 }
24
25 switch v.Kind() {
26 case reflect.Struct:
27 strtype := v.Type().String()
28 if strtype == "time.Time" {
29 fmt.Fprintf(buf, "%s", v.Interface())
30 break
31 } else if strings.HasPrefix(strtype, "io.") {
32 buf.WriteString("<buffer>")
33 break
34 }
35
36 buf.WriteString("{\n")
37
38 names := []string{}
39 for i := 0; i < v.Type().NumField(); i++ {
40 name := v.Type().Field(i).Name
41 f := v.Field(i)
42 if name[0:1] == strings.ToLower(name[0:1]) {
43 continue // ignore unexported fields
44 }
45 if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
46 continue // ignore unset fields
47 }
48 names = append(names, name)
49 }
50
51 for i, n := range names {
52 val := v.FieldByName(n)
53 buf.WriteString(strings.Repeat(" ", indent+2))
54 buf.WriteString(n + ": ")
55 prettify(val, indent+2, buf)
56
57 if i < len(names)-1 {
58 buf.WriteString(",\n")
59 }
60 }
61
62 buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
63 case reflect.Slice:
64 strtype := v.Type().String()
65 if strtype == "[]uint8" {
66 fmt.Fprintf(buf, "<binary> len %d", v.Len())
67 break
68 }
69
70 nl, id, id2 := "", "", ""
71 if v.Len() > 3 {
72 nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
73 }
74 buf.WriteString("[" + nl)
75 for i := 0; i < v.Len(); i++ {
76 buf.WriteString(id2)
77 prettify(v.Index(i), indent+2, buf)
78
79 if i < v.Len()-1 {
80 buf.WriteString("," + nl)
81 }
82 }
83
84 buf.WriteString(nl + id + "]")
85 case reflect.Map:
86 buf.WriteString("{\n")
87
88 for i, k := range v.MapKeys() {
89 buf.WriteString(strings.Repeat(" ", indent+2))
90 buf.WriteString(k.String() + ": ")
91 prettify(v.MapIndex(k), indent+2, buf)
92
93 if i < v.Len()-1 {
94 buf.WriteString(",\n")
95 }
96 }
97
98 buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
99 default:
100 if !v.IsValid() {
101 fmt.Fprint(buf, "<invalid value>")
102 return
103 }
104 format := "%v"
105 switch v.Interface().(type) {
106 case string:
107 format = "%q"
108 case io.ReadSeeker, io.Reader:
109 format = "buffer(%p)"
110 }
111 fmt.Fprintf(buf, format, v.Interface())
112 }
113}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
new file mode 100644
index 0000000..b6432f1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
@@ -0,0 +1,89 @@
1package awsutil
2
3import (
4 "bytes"
5 "fmt"
6 "reflect"
7 "strings"
8)
9
10// StringValue returns the string representation of a value.
11func StringValue(i interface{}) string {
12 var buf bytes.Buffer
13 stringValue(reflect.ValueOf(i), 0, &buf)
14 return buf.String()
15}
16
17func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
18 for v.Kind() == reflect.Ptr {
19 v = v.Elem()
20 }
21
22 switch v.Kind() {
23 case reflect.Struct:
24 buf.WriteString("{\n")
25
26 names := []string{}
27 for i := 0; i < v.Type().NumField(); i++ {
28 name := v.Type().Field(i).Name
29 f := v.Field(i)
30 if name[0:1] == strings.ToLower(name[0:1]) {
31 continue // ignore unexported fields
32 }
33 if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
34 continue // ignore unset fields
35 }
36 names = append(names, name)
37 }
38
39 for i, n := range names {
40 val := v.FieldByName(n)
41 buf.WriteString(strings.Repeat(" ", indent+2))
42 buf.WriteString(n + ": ")
43 stringValue(val, indent+2, buf)
44
45 if i < len(names)-1 {
46 buf.WriteString(",\n")
47 }
48 }
49
50 buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
51 case reflect.Slice:
52 nl, id, id2 := "", "", ""
53 if v.Len() > 3 {
54 nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
55 }
56 buf.WriteString("[" + nl)
57 for i := 0; i < v.Len(); i++ {
58 buf.WriteString(id2)
59 stringValue(v.Index(i), indent+2, buf)
60
61 if i < v.Len()-1 {
62 buf.WriteString("," + nl)
63 }
64 }
65
66 buf.WriteString(nl + id + "]")
67 case reflect.Map:
68 buf.WriteString("{\n")
69
70 for i, k := range v.MapKeys() {
71 buf.WriteString(strings.Repeat(" ", indent+2))
72 buf.WriteString(k.String() + ": ")
73 stringValue(v.MapIndex(k), indent+2, buf)
74
75 if i < v.Len()-1 {
76 buf.WriteString(",\n")
77 }
78 }
79
80 buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
81 default:
82 format := "%v"
83 switch v.Interface().(type) {
84 case string:
85 format = "%q"
86 }
87 fmt.Fprintf(buf, format, v.Interface())
88 }
89}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
new file mode 100644
index 0000000..48b0fbd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -0,0 +1,149 @@
1package client
2
3import (
4 "fmt"
5 "net/http/httputil"
6
7 "github.com/aws/aws-sdk-go/aws"
8 "github.com/aws/aws-sdk-go/aws/awserr"
9 "github.com/aws/aws-sdk-go/aws/client/metadata"
10 "github.com/aws/aws-sdk-go/aws/request"
11)
12
13// A Config provides configuration to a service client instance.
14type Config struct {
15 Config *aws.Config
16 Handlers request.Handlers
17 Endpoint string
18 SigningRegion string
19 SigningName string
20}
21
22// ConfigProvider provides a generic way for a service client to receive
23// the ClientConfig without circular dependencies.
24type ConfigProvider interface {
25 ClientConfig(serviceName string, cfgs ...*aws.Config) Config
26}
27
28// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
29// resolve the endpoint automatically. The service client's endpoint must be
30// provided via the aws.Config.Endpoint field.
31type ConfigNoResolveEndpointProvider interface {
32 ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
33}
34
35// A Client implements the base client request and response handling
36// used by all service clients.
37type Client struct {
38 request.Retryer
39 metadata.ClientInfo
40
41 Config aws.Config
42 Handlers request.Handlers
43}
44
45// New will return a pointer to a new initialized service client.
46func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
47 svc := &Client{
48 Config: cfg,
49 ClientInfo: info,
50 Handlers: handlers.Copy(),
51 }
52
53 switch retryer, ok := cfg.Retryer.(request.Retryer); {
54 case ok:
55 svc.Retryer = retryer
56 case cfg.Retryer != nil && cfg.Logger != nil:
57 s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
58 cfg.Logger.Log(s)
59 fallthrough
60 default:
61 maxRetries := aws.IntValue(cfg.MaxRetries)
62 if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
63 maxRetries = 3
64 }
65 svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
66 }
67
68 svc.AddDebugHandlers()
69
70 for _, option := range options {
71 option(svc)
72 }
73
74 return svc
75}
76
77// NewRequest returns a new Request pointer for the service API
78// operation and parameters.
79func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
80 return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
81}
82
83// AddDebugHandlers injects debug logging handlers into the service to log request
84// debug information.
85func (c *Client) AddDebugHandlers() {
86 if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
87 return
88 }
89
90 c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest})
91 c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse})
92}
93
94const logReqMsg = `DEBUG: Request %s/%s Details:
95---[ REQUEST POST-SIGN ]-----------------------------
96%s
97-----------------------------------------------------`
98
99const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
100---[ REQUEST DUMP ERROR ]-----------------------------
101%s
102-----------------------------------------------------`
103
104func logRequest(r *request.Request) {
105 logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
106 dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
107 if err != nil {
108 r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
109 r.Error = awserr.New(request.ErrCodeRead, "an error occurred during request body reading", err)
110 return
111 }
112
113 if logBody {
114 // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
115 // Body as a NoOpCloser and will not be reset after read by the HTTP
116 // client reader.
117 r.ResetBody()
118 }
119
120 r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
121}
122
123const logRespMsg = `DEBUG: Response %s/%s Details:
124---[ RESPONSE ]--------------------------------------
125%s
126-----------------------------------------------------`
127
128const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
129---[ RESPONSE DUMP ERROR ]-----------------------------
130%s
131-----------------------------------------------------`
132
133func logResponse(r *request.Request) {
134 var msg = "no response data"
135 if r.HTTPResponse != nil {
136 logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
137 dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody)
138 if err != nil {
139 r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
140 r.Error = awserr.New(request.ErrCodeRead, "an error occurred during response body reading", err)
141 return
142 }
143
144 msg = string(dumpedBody)
145 } else if r.Error != nil {
146 msg = r.Error.Error()
147 }
148 r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
149}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
new file mode 100644
index 0000000..1313478
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
@@ -0,0 +1,96 @@
1package client
2
3import (
4 "math/rand"
5 "sync"
6 "time"
7
8 "github.com/aws/aws-sdk-go/aws/request"
9)
10
11// DefaultRetryer implements basic retry logic using exponential backoff for
12// most services. If you want to implement custom retry logic, implement the
13// request.Retryer interface or create a structure type that composes this
14// struct and override the specific methods. For example, to override only
15// the MaxRetries method:
16//
17// type retryer struct {
18// service.DefaultRetryer
19// }
20//
21// // This implementation always has 100 max retries
22// func (d retryer) MaxRetries() uint { return 100 }
23type DefaultRetryer struct {
24 NumMaxRetries int
25}
26
27// MaxRetries returns the number of maximum returns the service will use to make
28// an individual API request.
29func (d DefaultRetryer) MaxRetries() int {
30 return d.NumMaxRetries
31}
32
33var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
34
35// RetryRules returns the delay duration before retrying this request again
36func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
37 // Set the upper limit of delay in retrying at ~five minutes
38 minTime := 30
39 throttle := d.shouldThrottle(r)
40 if throttle {
41 minTime = 500
42 }
43
44 retryCount := r.RetryCount
45 if retryCount > 13 {
46 retryCount = 13
47 } else if throttle && retryCount > 8 {
48 retryCount = 8
49 }
50
51 delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
52 return time.Duration(delay) * time.Millisecond
53}
54
55// ShouldRetry returns true if the request should be retried.
56func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
57 // If one of the other handlers already set the retry state
58 // we don't want to override it based on the service's state
59 if r.Retryable != nil {
60 return *r.Retryable
61 }
62
63 if r.HTTPResponse.StatusCode >= 500 {
64 return true
65 }
66 return r.IsErrorRetryable() || d.shouldThrottle(r)
67}
68
69// ShouldThrottle returns true if the request should be throttled.
70func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
71 if r.HTTPResponse.StatusCode == 502 ||
72 r.HTTPResponse.StatusCode == 503 ||
73 r.HTTPResponse.StatusCode == 504 {
74 return true
75 }
76 return r.IsErrorThrottle()
77}
78
79// lockedSource is a thread-safe implementation of rand.Source
80type lockedSource struct {
81 lk sync.Mutex
82 src rand.Source
83}
84
85func (r *lockedSource) Int63() (n int64) {
86 r.lk.Lock()
87 n = r.src.Int63()
88 r.lk.Unlock()
89 return
90}
91
92func (r *lockedSource) Seed(seed int64) {
93 r.lk.Lock()
94 r.src.Seed(seed)
95 r.lk.Unlock()
96}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
new file mode 100644
index 0000000..4778056
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
@@ -0,0 +1,12 @@
1package metadata
2
3// ClientInfo wraps immutable data from the client.Client structure.
4type ClientInfo struct {
5 ServiceName string
6 APIVersion string
7 Endpoint string
8 SigningName string
9 SigningRegion string
10 JSONVersion string
11 TargetPrefix string
12}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
new file mode 100644
index 0000000..d1f31f1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -0,0 +1,470 @@
1package aws
2
3import (
4 "net/http"
5 "time"
6
7 "github.com/aws/aws-sdk-go/aws/credentials"
8 "github.com/aws/aws-sdk-go/aws/endpoints"
9)
10
11// UseServiceDefaultRetries instructs the config to use the service's own
12// default number of retries. This will be the default action if
13// Config.MaxRetries is nil also.
14const UseServiceDefaultRetries = -1
15
16// RequestRetryer is an alias for a type that implements the request.Retryer
17// interface.
18type RequestRetryer interface{}
19
20// A Config provides service configuration for service clients. By default,
21// all clients will use the defaults.DefaultConfig tructure.
22//
23// // Create Session with MaxRetry configuration to be shared by multiple
24// // service clients.
25// sess := session.Must(session.NewSession(&aws.Config{
26// MaxRetries: aws.Int(3),
27// }))
28//
29// // Create S3 service client with a specific Region.
30// svc := s3.New(sess, &aws.Config{
31// Region: aws.String("us-west-2"),
32// })
33type Config struct {
34 // Enables verbose error printing of all credential chain errors.
35 // Should be used when wanting to see all errors while attempting to
36 // retrieve credentials.
37 CredentialsChainVerboseErrors *bool
38
39 // The credentials object to use when signing requests. Defaults to a
40 // chain of credential providers to search for credentials in environment
41 // variables, shared credential file, and EC2 Instance Roles.
42 Credentials *credentials.Credentials
43
44 // An optional endpoint URL (hostname only or fully qualified URI)
45 // that overrides the default generated endpoint for a client. Set this
46 // to `""` to use the default generated endpoint.
47 //
48 // @note You must still provide a `Region` value when specifying an
49 // endpoint for a client.
50 Endpoint *string
51
52 // The resolver to use for looking up endpoints for AWS service clients
53 // to use based on region.
54 EndpointResolver endpoints.Resolver
55
56 // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
57 // ShouldRetry regardless of whether or not if request.Retryable is set.
58 // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
59 // is not set, then ShouldRetry will only be called if request.Retryable is nil.
60 // Proper handling of the request.Retryable field is important when setting this field.
61 EnforceShouldRetryCheck *bool
62
63 // The region to send requests to. This parameter is required and must
64 // be configured globally or on a per-client basis unless otherwise
65 // noted. A full list of regions is found in the "Regions and Endpoints"
66 // document.
67 //
68 // @see http://docs.aws.amazon.com/general/latest/gr/rande.html
69 // AWS Regions and Endpoints
70 Region *string
71
72 // Set this to `true` to disable SSL when sending requests. Defaults
73 // to `false`.
74 DisableSSL *bool
75
76 // The HTTP client to use when sending requests. Defaults to
77 // `http.DefaultClient`.
78 HTTPClient *http.Client
79
80 // An integer value representing the logging level. The default log level
81 // is zero (LogOff), which represents no logging. To enable logging set
82 // to a LogLevel Value.
83 LogLevel *LogLevelType
84
85 // The logger writer interface to write logging messages to. Defaults to
86 // standard out.
87 Logger Logger
88
89 // The maximum number of times that a request will be retried for failures.
90 // Defaults to -1, which defers the max retry setting to the service
91 // specific configuration.
92 MaxRetries *int
93
94 // Retryer guides how HTTP requests should be retried in case of
95 // recoverable failures.
96 //
97 // When nil or the value does not implement the request.Retryer interface,
98 // the request.DefaultRetryer will be used.
99 //
100 // When both Retryer and MaxRetries are non-nil, the former is used and
101 // the latter ignored.
102 //
103 // To set the Retryer field in a type-safe manner and with chaining, use
104 // the request.WithRetryer helper function:
105 //
106 // cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
107 //
108 Retryer RequestRetryer
109
110 // Disables semantic parameter validation, which validates input for
111 // missing required fields and/or other semantic request input errors.
112 DisableParamValidation *bool
113
114 // Disables the computation of request and response checksums, e.g.,
115 // CRC32 checksums in Amazon DynamoDB.
116 DisableComputeChecksums *bool
117
118 // Set this to `true` to force the request to use path-style addressing,
119 // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
120 // will use virtual hosted bucket addressing when possible
121 // (`http://BUCKET.s3.amazonaws.com/KEY`).
122 //
123 // @note This configuration option is specific to the Amazon S3 service.
124 // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
125 // Amazon S3: Virtual Hosting of Buckets
126 S3ForcePathStyle *bool
127
128 // Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
129 // header to PUT requests over 2MB of content. 100-Continue instructs the
130 // HTTP client not to send the body until the service responds with a
131 // `continue` status. This is useful to prevent sending the request body
132 // until after the request is authenticated, and validated.
133 //
134 // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
135 //
136 // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
137 // `ExpectContinueTimeout` for information on adjusting the continue wait
138 // timeout. https://golang.org/pkg/net/http/#Transport
139 //
140 // You should use this flag to disble 100-Continue if you experience issues
141 // with proxies or third party S3 compatible services.
142 S3Disable100Continue *bool
143
144 // Set this to `true` to enable S3 Accelerate feature. For all operations
145 // compatible with S3 Accelerate will use the accelerate endpoint for
146 // requests. Requests not compatible will fall back to normal S3 requests.
147 //
148 // The bucket must be enable for accelerate to be used with S3 client with
149 // accelerate enabled. If the bucket is not enabled for accelerate an error
150 // will be returned. The bucket name must be DNS compatible to also work
151 // with accelerate.
152 S3UseAccelerate *bool
153
154 // Set this to `true` to disable the EC2Metadata client from overriding the
155 // default http.Client's Timeout. This is helpful if you do not want the
156 // EC2Metadata client to create a new http.Client. This options is only
157 // meaningful if you're not already using a custom HTTP client with the
158 // SDK. Enabled by default.
159 //
160 // Must be set and provided to the session.NewSession() in order to disable
161 // the EC2Metadata overriding the timeout for default credentials chain.
162 //
163 // Example:
164 // sess := session.Must(session.NewSession(aws.NewConfig()
165 // .WithEC2MetadataDiableTimeoutOverride(true)))
166 //
167 // svc := s3.New(sess)
168 //
169 EC2MetadataDisableTimeoutOverride *bool
170
171 // Instructs the endpiont to be generated for a service client to
172 // be the dual stack endpoint. The dual stack endpoint will support
173 // both IPv4 and IPv6 addressing.
174 //
175 // Setting this for a service which does not support dual stack will fail
176 // to make requets. It is not recommended to set this value on the session
177 // as it will apply to all service clients created with the session. Even
178 // services which don't support dual stack endpoints.
179 //
180 // If the Endpoint config value is also provided the UseDualStack flag
181 // will be ignored.
182 //
183 // Only supported with.
184 //
185 // sess := session.Must(session.NewSession())
186 //
187 // svc := s3.New(sess, &aws.Config{
188 // UseDualStack: aws.Bool(true),
189 // })
190 UseDualStack *bool
191
192 // SleepDelay is an override for the func the SDK will call when sleeping
193 // during the lifecycle of a request. Specifically this will be used for
194 // request delays. This value should only be used for testing. To adjust
195 // the delay of a request see the aws/client.DefaultRetryer and
196 // aws/request.Retryer.
197 //
198 // SleepDelay will prevent any Context from being used for canceling retry
199 // delay of an API operation. It is recommended to not use SleepDelay at all
200 // and specify a Retryer instead.
201 SleepDelay func(time.Duration)
202
203 // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
204 // Will default to false. This would only be used for empty directory names in s3 requests.
205 //
206 // Example:
207 // sess := session.Must(session.NewSession(&aws.Config{
208 // DisableRestProtocolURICleaning: aws.Bool(true),
209 // }))
210 //
211 // svc := s3.New(sess)
212 // out, err := svc.GetObject(&s3.GetObjectInput {
213 // Bucket: aws.String("bucketname"),
214 // Key: aws.String("//foo//bar//moo"),
215 // })
216 DisableRestProtocolURICleaning *bool
217}
218
219// NewConfig returns a new Config pointer that can be chained with builder
220// methods to set multiple configuration values inline without using pointers.
221//
222// // Create Session with MaxRetry configuration to be shared by multiple
223// // service clients.
224// sess := session.Must(session.NewSession(aws.NewConfig().
225// WithMaxRetries(3),
226// ))
227//
228// // Create S3 service client with a specific Region.
229// svc := s3.New(sess, aws.NewConfig().
230// WithRegion("us-west-2"),
231// )
232func NewConfig() *Config {
233 return &Config{}
234}
235
236// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
237// a Config pointer.
238func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
239 c.CredentialsChainVerboseErrors = &verboseErrs
240 return c
241}
242
243// WithCredentials sets a config Credentials value returning a Config pointer
244// for chaining.
245func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
246 c.Credentials = creds
247 return c
248}
249
250// WithEndpoint sets a config Endpoint value returning a Config pointer for
251// chaining.
252func (c *Config) WithEndpoint(endpoint string) *Config {
253 c.Endpoint = &endpoint
254 return c
255}
256
257// WithEndpointResolver sets a config EndpointResolver value returning a
258// Config pointer for chaining.
259func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
260 c.EndpointResolver = resolver
261 return c
262}
263
264// WithRegion sets a config Region value returning a Config pointer for
265// chaining.
266func (c *Config) WithRegion(region string) *Config {
267 c.Region = &region
268 return c
269}
270
271// WithDisableSSL sets a config DisableSSL value returning a Config pointer
272// for chaining.
273func (c *Config) WithDisableSSL(disable bool) *Config {
274 c.DisableSSL = &disable
275 return c
276}
277
278// WithHTTPClient sets a config HTTPClient value returning a Config pointer
279// for chaining.
280func (c *Config) WithHTTPClient(client *http.Client) *Config {
281 c.HTTPClient = client
282 return c
283}
284
285// WithMaxRetries sets a config MaxRetries value returning a Config pointer
286// for chaining.
287func (c *Config) WithMaxRetries(max int) *Config {
288 c.MaxRetries = &max
289 return c
290}
291
292// WithDisableParamValidation sets a config DisableParamValidation value
293// returning a Config pointer for chaining.
294func (c *Config) WithDisableParamValidation(disable bool) *Config {
295 c.DisableParamValidation = &disable
296 return c
297}
298
299// WithDisableComputeChecksums sets a config DisableComputeChecksums value
300// returning a Config pointer for chaining.
301func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
302 c.DisableComputeChecksums = &disable
303 return c
304}
305
306// WithLogLevel sets a config LogLevel value returning a Config pointer for
307// chaining.
308func (c *Config) WithLogLevel(level LogLevelType) *Config {
309 c.LogLevel = &level
310 return c
311}
312
313// WithLogger sets a config Logger value returning a Config pointer for
314// chaining.
315func (c *Config) WithLogger(logger Logger) *Config {
316 c.Logger = logger
317 return c
318}
319
320// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
321// pointer for chaining.
322func (c *Config) WithS3ForcePathStyle(force bool) *Config {
323 c.S3ForcePathStyle = &force
324 return c
325}
326
327// WithS3Disable100Continue sets a config S3Disable100Continue value returning
328// a Config pointer for chaining.
329func (c *Config) WithS3Disable100Continue(disable bool) *Config {
330 c.S3Disable100Continue = &disable
331 return c
332}
333
334// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
335// pointer for chaining.
336func (c *Config) WithS3UseAccelerate(enable bool) *Config {
337 c.S3UseAccelerate = &enable
338 return c
339}
340
341// WithUseDualStack sets a config UseDualStack value returning a Config
342// pointer for chaining.
343func (c *Config) WithUseDualStack(enable bool) *Config {
344 c.UseDualStack = &enable
345 return c
346}
347
348// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
349// returning a Config pointer for chaining.
350func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
351 c.EC2MetadataDisableTimeoutOverride = &enable
352 return c
353}
354
355// WithSleepDelay overrides the function used to sleep while waiting for the
356// next retry. Defaults to time.Sleep.
357func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
358 c.SleepDelay = fn
359 return c
360}
361
362// MergeIn merges the passed in configs into the existing config object.
363func (c *Config) MergeIn(cfgs ...*Config) {
364 for _, other := range cfgs {
365 mergeInConfig(c, other)
366 }
367}
368
369func mergeInConfig(dst *Config, other *Config) {
370 if other == nil {
371 return
372 }
373
374 if other.CredentialsChainVerboseErrors != nil {
375 dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
376 }
377
378 if other.Credentials != nil {
379 dst.Credentials = other.Credentials
380 }
381
382 if other.Endpoint != nil {
383 dst.Endpoint = other.Endpoint
384 }
385
386 if other.EndpointResolver != nil {
387 dst.EndpointResolver = other.EndpointResolver
388 }
389
390 if other.Region != nil {
391 dst.Region = other.Region
392 }
393
394 if other.DisableSSL != nil {
395 dst.DisableSSL = other.DisableSSL
396 }
397
398 if other.HTTPClient != nil {
399 dst.HTTPClient = other.HTTPClient
400 }
401
402 if other.LogLevel != nil {
403 dst.LogLevel = other.LogLevel
404 }
405
406 if other.Logger != nil {
407 dst.Logger = other.Logger
408 }
409
410 if other.MaxRetries != nil {
411 dst.MaxRetries = other.MaxRetries
412 }
413
414 if other.Retryer != nil {
415 dst.Retryer = other.Retryer
416 }
417
418 if other.DisableParamValidation != nil {
419 dst.DisableParamValidation = other.DisableParamValidation
420 }
421
422 if other.DisableComputeChecksums != nil {
423 dst.DisableComputeChecksums = other.DisableComputeChecksums
424 }
425
426 if other.S3ForcePathStyle != nil {
427 dst.S3ForcePathStyle = other.S3ForcePathStyle
428 }
429
430 if other.S3Disable100Continue != nil {
431 dst.S3Disable100Continue = other.S3Disable100Continue
432 }
433
434 if other.S3UseAccelerate != nil {
435 dst.S3UseAccelerate = other.S3UseAccelerate
436 }
437
438 if other.UseDualStack != nil {
439 dst.UseDualStack = other.UseDualStack
440 }
441
442 if other.EC2MetadataDisableTimeoutOverride != nil {
443 dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
444 }
445
446 if other.SleepDelay != nil {
447 dst.SleepDelay = other.SleepDelay
448 }
449
450 if other.DisableRestProtocolURICleaning != nil {
451 dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
452 }
453
454 if other.EnforceShouldRetryCheck != nil {
455 dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
456 }
457}
458
459// Copy will return a shallow copy of the Config object. If any additional
460// configurations are provided they will be merged into the new config returned.
461func (c *Config) Copy(cfgs ...*Config) *Config {
462 dst := &Config{}
463 dst.MergeIn(c)
464
465 for _, cfg := range cfgs {
466 dst.MergeIn(cfg)
467 }
468
469 return dst
470}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context.go
new file mode 100644
index 0000000..79f4268
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context.go
@@ -0,0 +1,71 @@
1package aws
2
3import (
4 "time"
5)
6
7// Context is an copy of the Go v1.7 stdlib's context.Context interface.
8// It is represented as a SDK interface to enable you to use the "WithContext"
9// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
10//
11// See https://golang.org/pkg/context on how to use contexts.
12type Context interface {
13 // Deadline returns the time when work done on behalf of this context
14 // should be canceled. Deadline returns ok==false when no deadline is
15 // set. Successive calls to Deadline return the same results.
16 Deadline() (deadline time.Time, ok bool)
17
18 // Done returns a channel that's closed when work done on behalf of this
19 // context should be canceled. Done may return nil if this context can
20 // never be canceled. Successive calls to Done return the same value.
21 Done() <-chan struct{}
22
23 // Err returns a non-nil error value after Done is closed. Err returns
24 // Canceled if the context was canceled or DeadlineExceeded if the
25 // context's deadline passed. No other values for Err are defined.
26 // After Done is closed, successive calls to Err return the same value.
27 Err() error
28
29 // Value returns the value associated with this context for key, or nil
30 // if no value is associated with key. Successive calls to Value with
31 // the same key returns the same result.
32 //
33 // Use context values only for request-scoped data that transits
34 // processes and API boundaries, not for passing optional parameters to
35 // functions.
36 Value(key interface{}) interface{}
37}
38
39// BackgroundContext returns a context that will never be canceled, has no
40// values, and no deadline. This context is used by the SDK to provide
41// backwards compatibility with non-context API operations and functionality.
42//
43// Go 1.6 and before:
44// This context function is equivalent to context.Background in the Go stdlib.
45//
46// Go 1.7 and later:
47// The context returned will be the value returned by context.Background()
48//
49// See https://golang.org/pkg/context for more information on Contexts.
50func BackgroundContext() Context {
51 return backgroundCtx
52}
53
54// SleepWithContext will wait for the timer duration to expire, or the context
55// is canceled. Which ever happens first. If the context is canceled the Context's
56// error will be returned.
57//
58// Expects Context to always return a non-nil error if the Done channel is closed.
59func SleepWithContext(ctx Context, dur time.Duration) error {
60 t := time.NewTimer(dur)
61 defer t.Stop()
62
63 select {
64 case <-t.C:
65 break
66 case <-ctx.Done():
67 return ctx.Err()
68 }
69
70 return nil
71}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
new file mode 100644
index 0000000..e8cf93d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
@@ -0,0 +1,41 @@
1// +build !go1.7
2
3package aws
4
5import "time"
6
7// An emptyCtx is a copy of the the Go 1.7 context.emptyCtx type. This
8// is copied to provide a 1.6 and 1.5 safe version of context that is compatible
9// with Go 1.7's Context.
10//
11// An emptyCtx is never canceled, has no values, and has no deadline. It is not
12// struct{}, since vars of this type must have distinct addresses.
13type emptyCtx int
14
15func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
16 return
17}
18
19func (*emptyCtx) Done() <-chan struct{} {
20 return nil
21}
22
23func (*emptyCtx) Err() error {
24 return nil
25}
26
27func (*emptyCtx) Value(key interface{}) interface{} {
28 return nil
29}
30
31func (e *emptyCtx) String() string {
32 switch e {
33 case backgroundCtx:
34 return "aws.BackgroundContext"
35 }
36 return "unknown empty Context"
37}
38
39var (
40 backgroundCtx = new(emptyCtx)
41)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go
new file mode 100644
index 0000000..064f75c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go
@@ -0,0 +1,9 @@
1// +build go1.7
2
3package aws
4
5import "context"
6
7var (
8 backgroundCtx = context.Background()
9)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
new file mode 100644
index 0000000..3b73a7d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
@@ -0,0 +1,369 @@
1package aws
2
3import "time"
4
5// String returns a pointer to the string value passed in.
6func String(v string) *string {
7 return &v
8}
9
10// StringValue returns the value of the string pointer passed in or
11// "" if the pointer is nil.
12func StringValue(v *string) string {
13 if v != nil {
14 return *v
15 }
16 return ""
17}
18
19// StringSlice converts a slice of string values into a slice of
20// string pointers
21func StringSlice(src []string) []*string {
22 dst := make([]*string, len(src))
23 for i := 0; i < len(src); i++ {
24 dst[i] = &(src[i])
25 }
26 return dst
27}
28
29// StringValueSlice converts a slice of string pointers into a slice of
30// string values
31func StringValueSlice(src []*string) []string {
32 dst := make([]string, len(src))
33 for i := 0; i < len(src); i++ {
34 if src[i] != nil {
35 dst[i] = *(src[i])
36 }
37 }
38 return dst
39}
40
41// StringMap converts a string map of string values into a string
42// map of string pointers
43func StringMap(src map[string]string) map[string]*string {
44 dst := make(map[string]*string)
45 for k, val := range src {
46 v := val
47 dst[k] = &v
48 }
49 return dst
50}
51
52// StringValueMap converts a string map of string pointers into a string
53// map of string values
54func StringValueMap(src map[string]*string) map[string]string {
55 dst := make(map[string]string)
56 for k, val := range src {
57 if val != nil {
58 dst[k] = *val
59 }
60 }
61 return dst
62}
63
64// Bool returns a pointer to the bool value passed in.
65func Bool(v bool) *bool {
66 return &v
67}
68
69// BoolValue returns the value of the bool pointer passed in or
70// false if the pointer is nil.
71func BoolValue(v *bool) bool {
72 if v != nil {
73 return *v
74 }
75 return false
76}
77
78// BoolSlice converts a slice of bool values into a slice of
79// bool pointers
80func BoolSlice(src []bool) []*bool {
81 dst := make([]*bool, len(src))
82 for i := 0; i < len(src); i++ {
83 dst[i] = &(src[i])
84 }
85 return dst
86}
87
88// BoolValueSlice converts a slice of bool pointers into a slice of
89// bool values
90func BoolValueSlice(src []*bool) []bool {
91 dst := make([]bool, len(src))
92 for i := 0; i < len(src); i++ {
93 if src[i] != nil {
94 dst[i] = *(src[i])
95 }
96 }
97 return dst
98}
99
100// BoolMap converts a string map of bool values into a string
101// map of bool pointers
102func BoolMap(src map[string]bool) map[string]*bool {
103 dst := make(map[string]*bool)
104 for k, val := range src {
105 v := val
106 dst[k] = &v
107 }
108 return dst
109}
110
111// BoolValueMap converts a string map of bool pointers into a string
112// map of bool values
113func BoolValueMap(src map[string]*bool) map[string]bool {
114 dst := make(map[string]bool)
115 for k, val := range src {
116 if val != nil {
117 dst[k] = *val
118 }
119 }
120 return dst
121}
122
123// Int returns a pointer to the int value passed in.
124func Int(v int) *int {
125 return &v
126}
127
128// IntValue returns the value of the int pointer passed in or
129// 0 if the pointer is nil.
130func IntValue(v *int) int {
131 if v != nil {
132 return *v
133 }
134 return 0
135}
136
137// IntSlice converts a slice of int values into a slice of
138// int pointers
139func IntSlice(src []int) []*int {
140 dst := make([]*int, len(src))
141 for i := 0; i < len(src); i++ {
142 dst[i] = &(src[i])
143 }
144 return dst
145}
146
147// IntValueSlice converts a slice of int pointers into a slice of
148// int values
149func IntValueSlice(src []*int) []int {
150 dst := make([]int, len(src))
151 for i := 0; i < len(src); i++ {
152 if src[i] != nil {
153 dst[i] = *(src[i])
154 }
155 }
156 return dst
157}
158
159// IntMap converts a string map of int values into a string
160// map of int pointers
161func IntMap(src map[string]int) map[string]*int {
162 dst := make(map[string]*int)
163 for k, val := range src {
164 v := val
165 dst[k] = &v
166 }
167 return dst
168}
169
170// IntValueMap converts a string map of int pointers into a string
171// map of int values
172func IntValueMap(src map[string]*int) map[string]int {
173 dst := make(map[string]int)
174 for k, val := range src {
175 if val != nil {
176 dst[k] = *val
177 }
178 }
179 return dst
180}
181
182// Int64 returns a pointer to the int64 value passed in.
183func Int64(v int64) *int64 {
184 return &v
185}
186
187// Int64Value returns the value of the int64 pointer passed in or
188// 0 if the pointer is nil.
189func Int64Value(v *int64) int64 {
190 if v != nil {
191 return *v
192 }
193 return 0
194}
195
196// Int64Slice converts a slice of int64 values into a slice of
197// int64 pointers
198func Int64Slice(src []int64) []*int64 {
199 dst := make([]*int64, len(src))
200 for i := 0; i < len(src); i++ {
201 dst[i] = &(src[i])
202 }
203 return dst
204}
205
206// Int64ValueSlice converts a slice of int64 pointers into a slice of
207// int64 values
208func Int64ValueSlice(src []*int64) []int64 {
209 dst := make([]int64, len(src))
210 for i := 0; i < len(src); i++ {
211 if src[i] != nil {
212 dst[i] = *(src[i])
213 }
214 }
215 return dst
216}
217
218// Int64Map converts a string map of int64 values into a string
219// map of int64 pointers
220func Int64Map(src map[string]int64) map[string]*int64 {
221 dst := make(map[string]*int64)
222 for k, val := range src {
223 v := val
224 dst[k] = &v
225 }
226 return dst
227}
228
229// Int64ValueMap converts a string map of int64 pointers into a string
230// map of int64 values
231func Int64ValueMap(src map[string]*int64) map[string]int64 {
232 dst := make(map[string]int64)
233 for k, val := range src {
234 if val != nil {
235 dst[k] = *val
236 }
237 }
238 return dst
239}
240
241// Float64 returns a pointer to the float64 value passed in.
242func Float64(v float64) *float64 {
243 return &v
244}
245
246// Float64Value returns the value of the float64 pointer passed in or
247// 0 if the pointer is nil.
248func Float64Value(v *float64) float64 {
249 if v != nil {
250 return *v
251 }
252 return 0
253}
254
255// Float64Slice converts a slice of float64 values into a slice of
256// float64 pointers
257func Float64Slice(src []float64) []*float64 {
258 dst := make([]*float64, len(src))
259 for i := 0; i < len(src); i++ {
260 dst[i] = &(src[i])
261 }
262 return dst
263}
264
265// Float64ValueSlice converts a slice of float64 pointers into a slice of
266// float64 values
267func Float64ValueSlice(src []*float64) []float64 {
268 dst := make([]float64, len(src))
269 for i := 0; i < len(src); i++ {
270 if src[i] != nil {
271 dst[i] = *(src[i])
272 }
273 }
274 return dst
275}
276
277// Float64Map converts a string map of float64 values into a string
278// map of float64 pointers
279func Float64Map(src map[string]float64) map[string]*float64 {
280 dst := make(map[string]*float64)
281 for k, val := range src {
282 v := val
283 dst[k] = &v
284 }
285 return dst
286}
287
288// Float64ValueMap converts a string map of float64 pointers into a string
289// map of float64 values
290func Float64ValueMap(src map[string]*float64) map[string]float64 {
291 dst := make(map[string]float64)
292 for k, val := range src {
293 if val != nil {
294 dst[k] = *val
295 }
296 }
297 return dst
298}
299
300// Time returns a pointer to the time.Time value passed in.
301func Time(v time.Time) *time.Time {
302 return &v
303}
304
305// TimeValue returns the value of the time.Time pointer passed in or
306// time.Time{} if the pointer is nil.
307func TimeValue(v *time.Time) time.Time {
308 if v != nil {
309 return *v
310 }
311 return time.Time{}
312}
313
314// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
315// The result is undefined if the Unix time cannot be represented by an int64.
316// Which includes calling TimeUnixMilli on a zero Time is undefined.
317//
318// This utility is useful for service API's such as CloudWatch Logs which require
319// their unix time values to be in milliseconds.
320//
321// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
322func TimeUnixMilli(t time.Time) int64 {
323 return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
324}
325
326// TimeSlice converts a slice of time.Time values into a slice of
327// time.Time pointers
328func TimeSlice(src []time.Time) []*time.Time {
329 dst := make([]*time.Time, len(src))
330 for i := 0; i < len(src); i++ {
331 dst[i] = &(src[i])
332 }
333 return dst
334}
335
336// TimeValueSlice converts a slice of time.Time pointers into a slice of
337// time.Time values
338func TimeValueSlice(src []*time.Time) []time.Time {
339 dst := make([]time.Time, len(src))
340 for i := 0; i < len(src); i++ {
341 if src[i] != nil {
342 dst[i] = *(src[i])
343 }
344 }
345 return dst
346}
347
348// TimeMap converts a string map of time.Time values into a string
349// map of time.Time pointers
350func TimeMap(src map[string]time.Time) map[string]*time.Time {
351 dst := make(map[string]*time.Time)
352 for k, val := range src {
353 v := val
354 dst[k] = &v
355 }
356 return dst
357}
358
359// TimeValueMap converts a string map of time.Time pointers into a string
360// map of time.Time values
361func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
362 dst := make(map[string]time.Time)
363 for k, val := range src {
364 if val != nil {
365 dst[k] = *val
366 }
367 }
368 return dst
369}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
new file mode 100644
index 0000000..25b461c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -0,0 +1,226 @@
1package corehandlers
2
3import (
4 "bytes"
5 "fmt"
6 "io"
7 "io/ioutil"
8 "net/http"
9 "net/url"
10 "regexp"
11 "runtime"
12 "strconv"
13 "time"
14
15 "github.com/aws/aws-sdk-go/aws"
16 "github.com/aws/aws-sdk-go/aws/awserr"
17 "github.com/aws/aws-sdk-go/aws/credentials"
18 "github.com/aws/aws-sdk-go/aws/request"
19)
20
21// Interface for matching types which also have a Len method.
22type lener interface {
23 Len() int
24}
25
26// BuildContentLengthHandler builds the content length of a request based on the body,
27// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
28// to determine request body length and no "Content-Length" was specified it will panic.
29//
30// The Content-Length will only be added to the request if the length of the body
31// is greater than 0. If the body is empty or the current `Content-Length`
32// header is <= 0, the header will also be stripped.
33var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
34 var length int64
35
36 if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
37 length, _ = strconv.ParseInt(slength, 10, 64)
38 } else {
39 switch body := r.Body.(type) {
40 case nil:
41 length = 0
42 case lener:
43 length = int64(body.Len())
44 case io.Seeker:
45 r.BodyStart, _ = body.Seek(0, 1)
46 end, _ := body.Seek(0, 2)
47 body.Seek(r.BodyStart, 0) // make sure to seek back to original location
48 length = end - r.BodyStart
49 default:
50 panic("Cannot get length of body, must provide `ContentLength`")
51 }
52 }
53
54 if length > 0 {
55 r.HTTPRequest.ContentLength = length
56 r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
57 } else {
58 r.HTTPRequest.ContentLength = 0
59 r.HTTPRequest.Header.Del("Content-Length")
60 }
61}}
62
63// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
64var SDKVersionUserAgentHandler = request.NamedHandler{
65 Name: "core.SDKVersionUserAgentHandler",
66 Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
67 runtime.Version(), runtime.GOOS, runtime.GOARCH),
68}
69
70var reStatusCode = regexp.MustCompile(`^(\d{3})`)
71
72// ValidateReqSigHandler is a request handler to ensure that the request's
73// signature doesn't expire before it is sent. This can happen when a request
74// is built and signed significantly before it is sent. Or significant delays
75// occur when retrying requests that would cause the signature to expire.
76var ValidateReqSigHandler = request.NamedHandler{
77 Name: "core.ValidateReqSigHandler",
78 Fn: func(r *request.Request) {
79 // Unsigned requests are not signed
80 if r.Config.Credentials == credentials.AnonymousCredentials {
81 return
82 }
83
84 signedTime := r.Time
85 if !r.LastSignedAt.IsZero() {
86 signedTime = r.LastSignedAt
87 }
88
89 // 10 minutes to allow for some clock skew/delays in transmission.
90 // Would be improved with aws/aws-sdk-go#423
91 if signedTime.Add(10 * time.Minute).After(time.Now()) {
92 return
93 }
94
95 fmt.Println("request expired, resigning")
96 r.Sign()
97 },
98}
99
100// SendHandler is a request handler to send service request using HTTP client.
101var SendHandler = request.NamedHandler{
102 Name: "core.SendHandler",
103 Fn: func(r *request.Request) {
104 sender := sendFollowRedirects
105 if r.DisableFollowRedirects {
106 sender = sendWithoutFollowRedirects
107 }
108
109 var err error
110 r.HTTPResponse, err = sender(r)
111 if err != nil {
112 handleSendError(r, err)
113 }
114 },
115}
116
117func sendFollowRedirects(r *request.Request) (*http.Response, error) {
118 return r.Config.HTTPClient.Do(r.HTTPRequest)
119}
120
121func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
122 transport := r.Config.HTTPClient.Transport
123 if transport == nil {
124 transport = http.DefaultTransport
125 }
126
127 return transport.RoundTrip(r.HTTPRequest)
128}
129
130func handleSendError(r *request.Request, err error) {
131 // Prevent leaking if an HTTPResponse was returned. Clean up
132 // the body.
133 if r.HTTPResponse != nil {
134 r.HTTPResponse.Body.Close()
135 }
136 // Capture the case where url.Error is returned for error processing
137 // response. e.g. 301 without location header comes back as string
138 // error and r.HTTPResponse is nil. Other URL redirect errors will
139 // comeback in a similar method.
140 if e, ok := err.(*url.Error); ok && e.Err != nil {
141 if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
142 code, _ := strconv.ParseInt(s[1], 10, 64)
143 r.HTTPResponse = &http.Response{
144 StatusCode: int(code),
145 Status: http.StatusText(int(code)),
146 Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
147 }
148 return
149 }
150 }
151 if r.HTTPResponse == nil {
152 // Add a dummy request response object to ensure the HTTPResponse
153 // value is consistent.
154 r.HTTPResponse = &http.Response{
155 StatusCode: int(0),
156 Status: http.StatusText(int(0)),
157 Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
158 }
159 }
160 // Catch all other request errors.
161 r.Error = awserr.New("RequestError", "send request failed", err)
162 r.Retryable = aws.Bool(true) // network errors are retryable
163
164 // Override the error with a context canceled error, if that was canceled.
165 ctx := r.Context()
166 select {
167 case <-ctx.Done():
168 r.Error = awserr.New(request.CanceledErrorCode,
169 "request context canceled", ctx.Err())
170 r.Retryable = aws.Bool(false)
171 default:
172 }
173}
174
175// ValidateResponseHandler is a request handler to validate service response.
176var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
177 if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
178 // this may be replaced by an UnmarshalError handler
179 r.Error = awserr.New("UnknownError", "unknown error", nil)
180 }
181}}
182
183// AfterRetryHandler performs final checks to determine if the request should
184// be retried and how long to delay.
185var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
186 // If one of the other handlers already set the retry state
187 // we don't want to override it based on the service's state
188 if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
189 r.Retryable = aws.Bool(r.ShouldRetry(r))
190 }
191
192 if r.WillRetry() {
193 r.RetryDelay = r.RetryRules(r)
194
195 if sleepFn := r.Config.SleepDelay; sleepFn != nil {
196 // Support SleepDelay for backwards compatibility and testing
197 sleepFn(r.RetryDelay)
198 } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
199 r.Error = awserr.New(request.CanceledErrorCode,
200 "request context canceled", err)
201 r.Retryable = aws.Bool(false)
202 return
203 }
204
205 // when the expired token exception occurs the credentials
206 // need to be expired locally so that the next request to
207 // get credentials will trigger a credentials refresh.
208 if r.IsErrorExpired() {
209 r.Config.Credentials.Expire()
210 }
211
212 r.RetryCount++
213 r.Error = nil
214 }
215}}
216
217// ValidateEndpointHandler is a request handler to validate a request had the
218// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
219// region is not valid.
220var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
221 if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
222 r.Error = aws.ErrMissingRegion
223 } else if r.ClientInfo.Endpoint == "" {
224 r.Error = aws.ErrMissingEndpoint
225 }
226}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
new file mode 100644
index 0000000..7d50b15
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
@@ -0,0 +1,17 @@
1package corehandlers
2
3import "github.com/aws/aws-sdk-go/aws/request"
4
5// ValidateParametersHandler is a request handler to validate the input parameters.
6// Validating parameters only has meaning if done prior to the request being sent.
7var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
8 if !r.ParamsFilled() {
9 return
10 }
11
12 if v, ok := r.Params.(request.Validator); ok {
13 if err := v.Validate(); err != nil {
14 r.Error = err
15 }
16 }
17}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
new file mode 100644
index 0000000..f298d65
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
@@ -0,0 +1,102 @@
1package credentials
2
3import (
4 "github.com/aws/aws-sdk-go/aws/awserr"
5)
6
7var (
8 // ErrNoValidProvidersFoundInChain Is returned when there are no valid
9 // providers in the ChainProvider.
10 //
11 // This has been deprecated. For verbose error messaging set
12 // aws.Config.CredentialsChainVerboseErrors to true
13 //
14 // @readonly
15 ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
16 `no valid providers in chain. Deprecated.
17 For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
18 nil)
19)
20
21// A ChainProvider will search for a provider which returns credentials
22// and cache that provider until Retrieve is called again.
23//
24// The ChainProvider provides a way of chaining multiple providers together
25// which will pick the first available using priority order of the Providers
26// in the list.
27//
28// If none of the Providers retrieve valid credentials Value, ChainProvider's
29// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
30//
31// If a Provider is found which returns valid credentials Value ChainProvider
32// will cache that Provider for all calls to IsExpired(), until Retrieve is
33// called again.
34//
35// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
36// In this example EnvProvider will first check if any credentials are available
37// via the environment variables. If there are none ChainProvider will check
38// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
39// does not return any credentials ChainProvider will return the error
40// ErrNoValidProvidersFoundInChain
41//
42// creds := credentials.NewChainCredentials(
43// []credentials.Provider{
44// &credentials.EnvProvider{},
45// &ec2rolecreds.EC2RoleProvider{
46// Client: ec2metadata.New(sess),
47// },
48// })
49//
50// // Usage of ChainCredentials with aws.Config
51// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
52// Credentials: creds,
53// })))
54//
55type ChainProvider struct {
56 Providers []Provider
57 curr Provider
58 VerboseErrors bool
59}
60
61// NewChainCredentials returns a pointer to a new Credentials object
62// wrapping a chain of providers.
63func NewChainCredentials(providers []Provider) *Credentials {
64 return NewCredentials(&ChainProvider{
65 Providers: append([]Provider{}, providers...),
66 })
67}
68
69// Retrieve returns the credentials value or error if no provider returned
70// without error.
71//
72// If a provider is found it will be cached and any calls to IsExpired()
73// will return the expired state of the cached provider.
74func (c *ChainProvider) Retrieve() (Value, error) {
75 var errs []error
76 for _, p := range c.Providers {
77 creds, err := p.Retrieve()
78 if err == nil {
79 c.curr = p
80 return creds, nil
81 }
82 errs = append(errs, err)
83 }
84 c.curr = nil
85
86 var err error
87 err = ErrNoValidProvidersFoundInChain
88 if c.VerboseErrors {
89 err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
90 }
91 return Value{}, err
92}
93
94// IsExpired will returned the expired state of the currently cached provider
95// if there is one. If there is no current provider, true will be returned.
96func (c *ChainProvider) IsExpired() bool {
97 if c.curr != nil {
98 return c.curr.IsExpired()
99 }
100
101 return true
102}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
new file mode 100644
index 0000000..42416fc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -0,0 +1,246 @@
1// Package credentials provides credential retrieval and management
2//
3// The Credentials is the primary method of getting access to and managing
4// credentials Values. Using dependency injection retrieval of the credential
5// values is handled by a object which satisfies the Provider interface.
6//
7// By default the Credentials.Get() will cache the successful result of a
8// Provider's Retrieve() until Provider.IsExpired() returns true. At which
9// point Credentials will call Provider's Retrieve() to get new credential Value.
10//
11// The Provider is responsible for determining when credentials Value have expired.
12// It is also important to note that Credentials will always call Retrieve the
13// first time Credentials.Get() is called.
14//
15// Example of using the environment variable credentials.
16//
17// creds := credentials.NewEnvCredentials()
18//
19// // Retrieve the credentials value
20// credValue, err := creds.Get()
21// if err != nil {
22// // handle error
23// }
24//
25// Example of forcing credentials to expire and be refreshed on the next Get().
26// This may be helpful to proactively expire credentials and refresh them sooner
27// than they would naturally expire on their own.
28//
29// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
30// creds.Expire()
31// credsValue, err := creds.Get()
32// // New credentials will be retrieved instead of from cache.
33//
34//
35// Custom Provider
36//
37// Each Provider built into this package also provides a helper method to generate
38// a Credentials pointer setup with the provider. To use a custom Provider just
39// create a type which satisfies the Provider interface and pass it to the
40// NewCredentials method.
41//
42// type MyProvider struct{}
43// func (m *MyProvider) Retrieve() (Value, error) {...}
44// func (m *MyProvider) IsExpired() bool {...}
45//
46// creds := credentials.NewCredentials(&MyProvider{})
47// credValue, err := creds.Get()
48//
49package credentials
50
51import (
52 "sync"
53 "time"
54)
55
56// AnonymousCredentials is an empty Credential object that can be used as
57// dummy placeholder credentials for requests that do not need signed.
58//
59// This Credentials can be used to configure a service to not sign requests
60// when making service API calls. For example, when accessing public
61// s3 buckets.
62//
63// svc := s3.New(session.Must(session.NewSession(&aws.Config{
64// Credentials: credentials.AnonymousCredentials,
65// })))
66// // Access public S3 buckets.
67//
68// @readonly
69var AnonymousCredentials = NewStaticCredentials("", "", "")
70
71// A Value is the AWS credentials value for individual credential fields.
72type Value struct {
73 // AWS Access key ID
74 AccessKeyID string
75
76 // AWS Secret Access Key
77 SecretAccessKey string
78
79 // AWS Session Token
80 SessionToken string
81
82 // Provider used to get credentials
83 ProviderName string
84}
85
86// A Provider is the interface for any component which will provide credentials
87// Value. A provider is required to manage its own Expired state, and what to
88// be expired means.
89//
90// The Provider should not need to implement its own mutexes, because
91// that will be managed by Credentials.
92type Provider interface {
93 // Retrieve returns nil if it successfully retrieved the value.
94 // Error is returned if the value were not obtainable, or empty.
95 Retrieve() (Value, error)
96
97 // IsExpired returns if the credentials are no longer valid, and need
98 // to be retrieved.
99 IsExpired() bool
100}
101
102// An ErrorProvider is a stub credentials provider that always returns an error
103// this is used by the SDK when construction a known provider is not possible
104// due to an error.
105type ErrorProvider struct {
106 // The error to be returned from Retrieve
107 Err error
108
109 // The provider name to set on the Retrieved returned Value
110 ProviderName string
111}
112
113// Retrieve will always return the error that the ErrorProvider was created with.
114func (p ErrorProvider) Retrieve() (Value, error) {
115 return Value{ProviderName: p.ProviderName}, p.Err
116}
117
118// IsExpired will always return not expired.
119func (p ErrorProvider) IsExpired() bool {
120 return false
121}
122
123// A Expiry provides shared expiration logic to be used by credentials
124// providers to implement expiry functionality.
125//
126// The best method to use this struct is as an anonymous field within the
127// provider's struct.
128//
129// Example:
130// type EC2RoleProvider struct {
131// Expiry
132// ...
133// }
134type Expiry struct {
135 // The date/time when to expire on
136 expiration time.Time
137
138 // If set will be used by IsExpired to determine the current time.
139 // Defaults to time.Now if CurrentTime is not set. Available for testing
140 // to be able to mock out the current time.
141 CurrentTime func() time.Time
142}
143
144// SetExpiration sets the expiration IsExpired will check when called.
145//
146// If window is greater than 0 the expiration time will be reduced by the
147// window value.
148//
149// Using a window is helpful to trigger credentials to expire sooner than
150// the expiration time given to ensure no requests are made with expired
151// tokens.
152func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
153 e.expiration = expiration
154 if window > 0 {
155 e.expiration = e.expiration.Add(-window)
156 }
157}
158
159// IsExpired returns if the credentials are expired.
160func (e *Expiry) IsExpired() bool {
161 if e.CurrentTime == nil {
162 e.CurrentTime = time.Now
163 }
164 return e.expiration.Before(e.CurrentTime())
165}
166
167// A Credentials provides synchronous safe retrieval of AWS credentials Value.
168// Credentials will cache the credentials value until they expire. Once the value
169// expires the next Get will attempt to retrieve valid credentials.
170//
171// Credentials is safe to use across multiple goroutines and will manage the
172// synchronous state so the Providers do not need to implement their own
173// synchronization.
174//
175// The first Credentials.Get() will always call Provider.Retrieve() to get the
176// first instance of the credentials Value. All calls to Get() after that
177// will return the cached credentials Value until IsExpired() returns true.
178type Credentials struct {
179 creds Value
180 forceRefresh bool
181 m sync.Mutex
182
183 provider Provider
184}
185
186// NewCredentials returns a pointer to a new Credentials with the provider set.
187func NewCredentials(provider Provider) *Credentials {
188 return &Credentials{
189 provider: provider,
190 forceRefresh: true,
191 }
192}
193
194// Get returns the credentials value, or error if the credentials Value failed
195// to be retrieved.
196//
197// Will return the cached credentials Value if it has not expired. If the
198// credentials Value has expired the Provider's Retrieve() will be called
199// to refresh the credentials.
200//
201// If Credentials.Expire() was called the credentials Value will be force
202// expired, and the next call to Get() will cause them to be refreshed.
203func (c *Credentials) Get() (Value, error) {
204 c.m.Lock()
205 defer c.m.Unlock()
206
207 if c.isExpired() {
208 creds, err := c.provider.Retrieve()
209 if err != nil {
210 return Value{}, err
211 }
212 c.creds = creds
213 c.forceRefresh = false
214 }
215
216 return c.creds, nil
217}
218
219// Expire expires the credentials and forces them to be retrieved on the
220// next call to Get().
221//
222// This will override the Provider's expired state, and force Credentials
223// to call the Provider's Retrieve().
224func (c *Credentials) Expire() {
225 c.m.Lock()
226 defer c.m.Unlock()
227
228 c.forceRefresh = true
229}
230
231// IsExpired returns if the credentials are no longer valid, and need
232// to be retrieved.
233//
234// If the Credentials were forced to be expired with Expire() this will
235// reflect that override.
236func (c *Credentials) IsExpired() bool {
237 c.m.Lock()
238 defer c.m.Unlock()
239
240 return c.isExpired()
241}
242
243// isExpired helper method wrapping the definition of expired credentials.
244func (c *Credentials) isExpired() bool {
245 return c.forceRefresh || c.provider.IsExpired()
246}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
new file mode 100644
index 0000000..c397495
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
@@ -0,0 +1,178 @@
1package ec2rolecreds
2
3import (
4 "bufio"
5 "encoding/json"
6 "fmt"
7 "path"
8 "strings"
9 "time"
10
11 "github.com/aws/aws-sdk-go/aws/awserr"
12 "github.com/aws/aws-sdk-go/aws/client"
13 "github.com/aws/aws-sdk-go/aws/credentials"
14 "github.com/aws/aws-sdk-go/aws/ec2metadata"
15)
16
17// ProviderName provides a name of EC2Role provider
18const ProviderName = "EC2RoleProvider"
19
20// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
21// those credentials are expired.
22//
23// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
24// or ExpiryWindow
25//
26// p := &ec2rolecreds.EC2RoleProvider{
27// // Pass in a custom timeout to be used when requesting
28// // IAM EC2 Role credentials.
29// Client: ec2metadata.New(sess, aws.Config{
30// HTTPClient: &http.Client{Timeout: 10 * time.Second},
31// }),
32//
33// // Do not use early expiry of credentials. If a non zero value is
34// // specified the credentials will be expired early
35// ExpiryWindow: 0,
36// }
37type EC2RoleProvider struct {
38 credentials.Expiry
39
40 // Required EC2Metadata client to use when connecting to EC2 metadata service.
41 Client *ec2metadata.EC2Metadata
42
43 // ExpiryWindow will allow the credentials to trigger refreshing prior to
44 // the credentials actually expiring. This is beneficial so race conditions
45 // with expiring credentials do not cause request to fail unexpectedly
46 // due to ExpiredTokenException exceptions.
47 //
48 // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
49 // 10 seconds before the credentials are actually expired.
50 //
51 // If ExpiryWindow is 0 or less it will be ignored.
52 ExpiryWindow time.Duration
53}
54
55// NewCredentials returns a pointer to a new Credentials object wrapping
56// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
57// The ConfigProvider is satisfied by the session.Session type.
58func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
59 p := &EC2RoleProvider{
60 Client: ec2metadata.New(c),
61 }
62
63 for _, option := range options {
64 option(p)
65 }
66
67 return credentials.NewCredentials(p)
68}
69
70// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
71// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
72// metadata service.
73func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
74 p := &EC2RoleProvider{
75 Client: client,
76 }
77
78 for _, option := range options {
79 option(p)
80 }
81
82 return credentials.NewCredentials(p)
83}
84
85// Retrieve retrieves credentials from the EC2 service.
86// Error will be returned if the request fails, or unable to extract
87// the desired credentials.
88func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
89 credsList, err := requestCredList(m.Client)
90 if err != nil {
91 return credentials.Value{ProviderName: ProviderName}, err
92 }
93
94 if len(credsList) == 0 {
95 return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
96 }
97 credsName := credsList[0]
98
99 roleCreds, err := requestCred(m.Client, credsName)
100 if err != nil {
101 return credentials.Value{ProviderName: ProviderName}, err
102 }
103
104 m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
105
106 return credentials.Value{
107 AccessKeyID: roleCreds.AccessKeyID,
108 SecretAccessKey: roleCreds.SecretAccessKey,
109 SessionToken: roleCreds.Token,
110 ProviderName: ProviderName,
111 }, nil
112}
113
114// A ec2RoleCredRespBody provides the shape for unmarshaling credential
115// request responses.
116type ec2RoleCredRespBody struct {
117 // Success State
118 Expiration time.Time
119 AccessKeyID string
120 SecretAccessKey string
121 Token string
122
123 // Error state
124 Code string
125 Message string
126}
127
128const iamSecurityCredsPath = "/iam/security-credentials"
129
130// requestCredList requests a list of credentials from the EC2 service.
131// If there are no credentials, or there is an error making or receiving the request
132func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
133 resp, err := client.GetMetadata(iamSecurityCredsPath)
134 if err != nil {
135 return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
136 }
137
138 credsList := []string{}
139 s := bufio.NewScanner(strings.NewReader(resp))
140 for s.Scan() {
141 credsList = append(credsList, s.Text())
142 }
143
144 if err := s.Err(); err != nil {
145 return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
146 }
147
148 return credsList, nil
149}
150
151// requestCred requests the credentials for a specific credentials from the EC2 service.
152//
153// If the credentials cannot be found, or there is an error reading the response
154// and error will be returned.
155func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
156 resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
157 if err != nil {
158 return ec2RoleCredRespBody{},
159 awserr.New("EC2RoleRequestError",
160 fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
161 err)
162 }
163
164 respCreds := ec2RoleCredRespBody{}
165 if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
166 return ec2RoleCredRespBody{},
167 awserr.New("SerializationError",
168 fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
169 err)
170 }
171
172 if respCreds.Code != "Success" {
173 // If an error code was returned something failed requesting the role.
174 return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
175 }
176
177 return respCreds, nil
178}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
new file mode 100644
index 0000000..a4cec5c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
@@ -0,0 +1,191 @@
1// Package endpointcreds provides support for retrieving credentials from an
2// arbitrary HTTP endpoint.
3//
4// The credentials endpoint Provider can receive both static and refreshable
5// credentials that will expire. Credentials are static when an "Expiration"
6// value is not provided in the endpoint's response.
7//
8// Static credentials will never expire once they have been retrieved. The format
9// of the static credentials response:
10// {
11// "AccessKeyId" : "MUA...",
12// "SecretAccessKey" : "/7PC5om....",
13// }
14//
15// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
16// value in the response. The format of the refreshable credentials response:
17// {
18// "AccessKeyId" : "MUA...",
19// "SecretAccessKey" : "/7PC5om....",
20// "Token" : "AQoDY....=",
21// "Expiration" : "2016-02-25T06:03:31Z"
22// }
23//
24// Errors should be returned in the following format and only returned with 400
25// or 500 HTTP status codes.
26// {
27// "code": "ErrorCode",
28// "message": "Helpful error message."
29// }
30package endpointcreds
31
32import (
33 "encoding/json"
34 "time"
35
36 "github.com/aws/aws-sdk-go/aws"
37 "github.com/aws/aws-sdk-go/aws/awserr"
38 "github.com/aws/aws-sdk-go/aws/client"
39 "github.com/aws/aws-sdk-go/aws/client/metadata"
40 "github.com/aws/aws-sdk-go/aws/credentials"
41 "github.com/aws/aws-sdk-go/aws/request"
42)
43
44// ProviderName is the name of the credentials provider.
45const ProviderName = `CredentialsEndpointProvider`
46
47// Provider satisfies the credentials.Provider interface, and is a client to
48// retrieve credentials from an arbitrary endpoint.
49type Provider struct {
50 staticCreds bool
51 credentials.Expiry
52
53 // Requires a AWS Client to make HTTP requests to the endpoint with.
54 // the Endpoint the request will be made to is provided by the aws.Config's
55 // Endpoint value.
56 Client *client.Client
57
58 // ExpiryWindow will allow the credentials to trigger refreshing prior to
59 // the credentials actually expiring. This is beneficial so race conditions
60 // with expiring credentials do not cause request to fail unexpectedly
61 // due to ExpiredTokenException exceptions.
62 //
63 // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
64 // 10 seconds before the credentials are actually expired.
65 //
66 // If ExpiryWindow is 0 or less it will be ignored.
67 ExpiryWindow time.Duration
68}
69
70// NewProviderClient returns a credentials Provider for retrieving AWS credentials
71// from arbitrary endpoint.
72func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
73 p := &Provider{
74 Client: client.New(
75 cfg,
76 metadata.ClientInfo{
77 ServiceName: "CredentialsEndpoint",
78 Endpoint: endpoint,
79 },
80 handlers,
81 ),
82 }
83
84 p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
85 p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
86 p.Client.Handlers.Validate.Clear()
87 p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
88
89 for _, option := range options {
90 option(p)
91 }
92
93 return p
94}
95
96// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
97// from an arbitrary endpoint concurrently. The client will request the
98func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
99 return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
100}
101
102// IsExpired returns true if the credentials retrieved are expired, or not yet
103// retrieved.
104func (p *Provider) IsExpired() bool {
105 if p.staticCreds {
106 return false
107 }
108 return p.Expiry.IsExpired()
109}
110
111// Retrieve will attempt to request the credentials from the endpoint the Provider
112// was configured for. And error will be returned if the retrieval fails.
113func (p *Provider) Retrieve() (credentials.Value, error) {
114 resp, err := p.getCredentials()
115 if err != nil {
116 return credentials.Value{ProviderName: ProviderName},
117 awserr.New("CredentialsEndpointError", "failed to load credentials", err)
118 }
119
120 if resp.Expiration != nil {
121 p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
122 } else {
123 p.staticCreds = true
124 }
125
126 return credentials.Value{
127 AccessKeyID: resp.AccessKeyID,
128 SecretAccessKey: resp.SecretAccessKey,
129 SessionToken: resp.Token,
130 ProviderName: ProviderName,
131 }, nil
132}
133
134type getCredentialsOutput struct {
135 Expiration *time.Time
136 AccessKeyID string
137 SecretAccessKey string
138 Token string
139}
140
141type errorOutput struct {
142 Code string `json:"code"`
143 Message string `json:"message"`
144}
145
146func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
147 op := &request.Operation{
148 Name: "GetCredentials",
149 HTTPMethod: "GET",
150 }
151
152 out := &getCredentialsOutput{}
153 req := p.Client.NewRequest(op, nil, out)
154 req.HTTPRequest.Header.Set("Accept", "application/json")
155
156 return out, req.Send()
157}
158
159func validateEndpointHandler(r *request.Request) {
160 if len(r.ClientInfo.Endpoint) == 0 {
161 r.Error = aws.ErrMissingEndpoint
162 }
163}
164
165func unmarshalHandler(r *request.Request) {
166 defer r.HTTPResponse.Body.Close()
167
168 out := r.Data.(*getCredentialsOutput)
169 if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
170 r.Error = awserr.New("SerializationError",
171 "failed to decode endpoint credentials",
172 err,
173 )
174 }
175}
176
177func unmarshalError(r *request.Request) {
178 defer r.HTTPResponse.Body.Close()
179
180 var errOut errorOutput
181 if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
182 r.Error = awserr.New("SerializationError",
183 "failed to decode endpoint credentials",
184 err,
185 )
186 }
187
188 // Response body format is not consistent between metadata endpoints.
189 // Grab the error message as a string and include that as the source error
190 r.Error = awserr.New(errOut.Code, errOut.Message, nil)
191}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
new file mode 100644
index 0000000..c14231a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
@@ -0,0 +1,78 @@
1package credentials
2
3import (
4 "os"
5
6 "github.com/aws/aws-sdk-go/aws/awserr"
7)
8
9// EnvProviderName provides a name of Env provider
10const EnvProviderName = "EnvProvider"
11
12var (
13 // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
14 // found in the process's environment.
15 //
16 // @readonly
17 ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
18
19 // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
20 // can't be found in the process's environment.
21 //
22 // @readonly
23 ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
24)
25
26// A EnvProvider retrieves credentials from the environment variables of the
27// running process. Environment credentials never expire.
28//
29// Environment variables used:
30//
31// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
32//
33// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
34type EnvProvider struct {
35 retrieved bool
36}
37
38// NewEnvCredentials returns a pointer to a new Credentials object
39// wrapping the environment variable provider.
40func NewEnvCredentials() *Credentials {
41 return NewCredentials(&EnvProvider{})
42}
43
44// Retrieve retrieves the keys from the environment.
45func (e *EnvProvider) Retrieve() (Value, error) {
46 e.retrieved = false
47
48 id := os.Getenv("AWS_ACCESS_KEY_ID")
49 if id == "" {
50 id = os.Getenv("AWS_ACCESS_KEY")
51 }
52
53 secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
54 if secret == "" {
55 secret = os.Getenv("AWS_SECRET_KEY")
56 }
57
58 if id == "" {
59 return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
60 }
61
62 if secret == "" {
63 return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
64 }
65
66 e.retrieved = true
67 return Value{
68 AccessKeyID: id,
69 SecretAccessKey: secret,
70 SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
71 ProviderName: EnvProviderName,
72 }, nil
73}
74
75// IsExpired returns if the credentials have been retrieved.
76func (e *EnvProvider) IsExpired() bool {
77 return !e.retrieved
78}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
new file mode 100644
index 0000000..7fc91d9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
@@ -0,0 +1,12 @@
1[default]
2aws_access_key_id = accessKey
3aws_secret_access_key = secret
4aws_session_token = token
5
6[no_token]
7aws_access_key_id = accessKey
8aws_secret_access_key = secret
9
10[with_colon]
11aws_access_key_id: accessKey
12aws_secret_access_key: secret
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
new file mode 100644
index 0000000..7fb7cbf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
@@ -0,0 +1,151 @@
1package credentials
2
3import (
4 "fmt"
5 "os"
6 "path/filepath"
7
8 "github.com/go-ini/ini"
9
10 "github.com/aws/aws-sdk-go/aws/awserr"
11)
12
13// SharedCredsProviderName provides a name of SharedCreds provider
14const SharedCredsProviderName = "SharedCredentialsProvider"
15
16var (
17 // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
18 //
19 // @readonly
20 ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
21)
22
23// A SharedCredentialsProvider retrieves credentials from the current user's home
24// directory, and keeps track if those credentials are expired.
25//
26// Profile ini file example: $HOME/.aws/credentials
27type SharedCredentialsProvider struct {
28 // Path to the shared credentials file.
29 //
30 // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
31 // env value is empty will default to current user's home directory.
32 // Linux/OSX: "$HOME/.aws/credentials"
33 // Windows: "%USERPROFILE%\.aws\credentials"
34 Filename string
35
36 // AWS Profile to extract credentials from the shared credentials file. If empty
37 // will default to environment variable "AWS_PROFILE" or "default" if
38 // environment variable is also not set.
39 Profile string
40
41 // retrieved states if the credentials have been successfully retrieved.
42 retrieved bool
43}
44
45// NewSharedCredentials returns a pointer to a new Credentials object
46// wrapping the Profile file provider.
47func NewSharedCredentials(filename, profile string) *Credentials {
48 return NewCredentials(&SharedCredentialsProvider{
49 Filename: filename,
50 Profile: profile,
51 })
52}
53
54// Retrieve reads and extracts the shared credentials from the current
55// users home directory.
56func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
57 p.retrieved = false
58
59 filename, err := p.filename()
60 if err != nil {
61 return Value{ProviderName: SharedCredsProviderName}, err
62 }
63
64 creds, err := loadProfile(filename, p.profile())
65 if err != nil {
66 return Value{ProviderName: SharedCredsProviderName}, err
67 }
68
69 p.retrieved = true
70 return creds, nil
71}
72
73// IsExpired returns if the shared credentials have expired.
74func (p *SharedCredentialsProvider) IsExpired() bool {
75 return !p.retrieved
76}
77
78// loadProfiles loads from the file pointed to by shared credentials filename for profile.
79// The credentials retrieved from the profile will be returned or error. Error will be
80// returned if it fails to read from the file, or the data is invalid.
81func loadProfile(filename, profile string) (Value, error) {
82 config, err := ini.Load(filename)
83 if err != nil {
84 return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
85 }
86 iniProfile, err := config.GetSection(profile)
87 if err != nil {
88 return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
89 }
90
91 id, err := iniProfile.GetKey("aws_access_key_id")
92 if err != nil {
93 return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
94 fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
95 err)
96 }
97
98 secret, err := iniProfile.GetKey("aws_secret_access_key")
99 if err != nil {
100 return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
101 fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
102 nil)
103 }
104
105 // Default to empty string if not found
106 token := iniProfile.Key("aws_session_token")
107
108 return Value{
109 AccessKeyID: id.String(),
110 SecretAccessKey: secret.String(),
111 SessionToken: token.String(),
112 ProviderName: SharedCredsProviderName,
113 }, nil
114}
115
116// filename returns the filename to use to read AWS shared credentials.
117//
118// Will return an error if the user's home directory path cannot be found.
119func (p *SharedCredentialsProvider) filename() (string, error) {
120 if p.Filename == "" {
121 if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" {
122 return p.Filename, nil
123 }
124
125 homeDir := os.Getenv("HOME") // *nix
126 if homeDir == "" { // Windows
127 homeDir = os.Getenv("USERPROFILE")
128 }
129 if homeDir == "" {
130 return "", ErrSharedCredentialsHomeNotFound
131 }
132
133 p.Filename = filepath.Join(homeDir, ".aws", "credentials")
134 }
135
136 return p.Filename, nil
137}
138
139// profile returns the AWS shared credentials profile. If empty will read
140// environment variable "AWS_PROFILE". If that is not set profile will
141// return "default".
142func (p *SharedCredentialsProvider) profile() string {
143 if p.Profile == "" {
144 p.Profile = os.Getenv("AWS_PROFILE")
145 }
146 if p.Profile == "" {
147 p.Profile = "default"
148 }
149
150 return p.Profile
151}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
new file mode 100644
index 0000000..4f5dab3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
@@ -0,0 +1,57 @@
1package credentials
2
3import (
4 "github.com/aws/aws-sdk-go/aws/awserr"
5)
6
7// StaticProviderName provides a name of Static provider
8const StaticProviderName = "StaticProvider"
9
10var (
11 // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
12 //
13 // @readonly
14 ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
15)
16
17// A StaticProvider is a set of credentials which are set programmatically,
18// and will never expire.
19type StaticProvider struct {
20 Value
21}
22
23// NewStaticCredentials returns a pointer to a new Credentials object
24// wrapping a static credentials value provider.
25func NewStaticCredentials(id, secret, token string) *Credentials {
26 return NewCredentials(&StaticProvider{Value: Value{
27 AccessKeyID: id,
28 SecretAccessKey: secret,
29 SessionToken: token,
30 }})
31}
32
33// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
34// wrapping the static credentials value provide. Same as NewStaticCredentials
35// but takes the creds Value instead of individual fields
36func NewStaticCredentialsFromCreds(creds Value) *Credentials {
37 return NewCredentials(&StaticProvider{Value: creds})
38}
39
40// Retrieve returns the credentials or error if the credentials are invalid.
41func (s *StaticProvider) Retrieve() (Value, error) {
42 if s.AccessKeyID == "" || s.SecretAccessKey == "" {
43 return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
44 }
45
46 if len(s.Value.ProviderName) == 0 {
47 s.Value.ProviderName = StaticProviderName
48 }
49 return s.Value, nil
50}
51
52// IsExpired returns if the credentials are expired.
53//
54// For StaticProvider, the credentials never expired.
55func (s *StaticProvider) IsExpired() bool {
56 return false
57}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
new file mode 100644
index 0000000..4108e43
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -0,0 +1,298 @@
1/*
2Package stscreds are credential Providers to retrieve STS AWS credentials.
3
4STS provides multiple ways to retrieve credentials which can be used when making
5future AWS service API operation calls.
6
7The SDK will ensure that per instance of credentials.Credentials all requests
8to refresh the credentials will be synchronized. But, the SDK is unable to
9ensure synchronous usage of the AssumeRoleProvider if the value is shared
10between multiple Credentials, Sessions or service clients.
11
12Assume Role
13
14To assume an IAM role using STS with the SDK you can create a new Credentials
15with the SDKs's stscreds package.
16
17 // Initial credentials loaded from SDK's default credential chain. Such as
18 // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
19 // Role. These credentials will be used to to make the STS Assume Role API.
20 sess := session.Must(session.NewSession())
21
22 // Create the credentials from AssumeRoleProvider to assume the role
23 // referenced by the "myRoleARN" ARN.
24 creds := stscreds.NewCredentials(sess, "myRoleArn")
25
26 // Create service client value configured for credentials
27 // from assumed role.
28 svc := s3.New(sess, &aws.Config{Credentials: creds})
29
30Assume Role with static MFA Token
31
32To assume an IAM role with a MFA token you can either specify a MFA token code
33directly or provide a function to prompt the user each time the credentials
34need to refresh the role's credentials. Specifying the TokenCode should be used
35for short lived operations that will not need to be refreshed, and when you do
36not want to have direct control over the user provides their MFA token.
37
38With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
39credentials.
40
41 // Create the credentials from AssumeRoleProvider to assume the role
42 // referenced by the "myRoleARN" ARN using the MFA token code provided.
43 creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
44 p.SerialNumber = aws.String("myTokenSerialNumber")
45 p.TokenCode = aws.String("00000000")
46 })
47
48 // Create service client value configured for credentials
49 // from assumed role.
50 svc := s3.New(sess, &aws.Config{Credentials: creds})
51
52Assume Role with MFA Token Provider
53
54To assume an IAM role with MFA for longer running tasks where the credentials
55may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
56will allow the credential provider to prompt for new MFA token code when the
57role's credentials need to be refreshed.
58
59The StdinTokenProvider function is available to prompt on stdin to retrieve
60the MFA token code from the user. You can also implement custom prompts by
61satisfing the TokenProvider function signature.
62
63Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
64have undesirable results as the StdinTokenProvider will not be synchronized. A
65single Credentials with an AssumeRoleProvider can be shared safely.
66
67 // Create the credentials from AssumeRoleProvider to assume the role
68 // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
69 creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
70 p.SerialNumber = aws.String("myTokenSerialNumber")
71 p.TokenProvider = stscreds.StdinTokenProvider
72 })
73
74 // Create service client value configured for credentials
75 // from assumed role.
76 svc := s3.New(sess, &aws.Config{Credentials: creds})
77
78*/
79package stscreds
80
81import (
82 "fmt"
83 "time"
84
85 "github.com/aws/aws-sdk-go/aws"
86 "github.com/aws/aws-sdk-go/aws/awserr"
87 "github.com/aws/aws-sdk-go/aws/client"
88 "github.com/aws/aws-sdk-go/aws/credentials"
89 "github.com/aws/aws-sdk-go/service/sts"
90)
91
92// StdinTokenProvider will prompt on stdout and read from stdin for a string value.
93// An error is returned if reading from stdin fails.
94//
95// Use this function go read MFA tokens from stdin. The function makes no attempt
96// to make atomic prompts from stdin across multiple gorouties.
97//
98// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
99// have undesirable results as the StdinTokenProvider will not be synchronized. A
100// single Credentials with an AssumeRoleProvider can be shared safely
101//
102// Will wait forever until something is provided on the stdin.
103func StdinTokenProvider() (string, error) {
104 var v string
105 fmt.Printf("Assume Role MFA token code: ")
106 _, err := fmt.Scanln(&v)
107
108 return v, err
109}
110
111// ProviderName provides a name of AssumeRole provider
112const ProviderName = "AssumeRoleProvider"
113
114// AssumeRoler represents the minimal subset of the STS client API used by this provider.
115type AssumeRoler interface {
116 AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
117}
118
119// DefaultDuration is the default amount of time in minutes that the credentials
120// will be valid for.
121var DefaultDuration = time.Duration(15) * time.Minute
122
123// AssumeRoleProvider retrieves temporary credentials from the STS service, and
124// keeps track of their expiration time.
125//
126// This credential provider will be used by the SDKs default credential change
127// when shared configuration is enabled, and the shared config or shared credentials
128// file configure assume role. See Session docs for how to do this.
129//
130// AssumeRoleProvider does not provide any synchronization and it is not safe
131// to share this value across multiple Credentials, Sessions, or service clients
132// without also sharing the same Credentials instance.
133type AssumeRoleProvider struct {
134 credentials.Expiry
135
136 // STS client to make assume role request with.
137 Client AssumeRoler
138
139 // Role to be assumed.
140 RoleARN string
141
142 // Session name, if you wish to reuse the credentials elsewhere.
143 RoleSessionName string
144
145 // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
146 Duration time.Duration
147
148 // Optional ExternalID to pass along, defaults to nil if not set.
149 ExternalID *string
150
151 // The policy plain text must be 2048 bytes or shorter. However, an internal
152 // conversion compresses it into a packed binary format with a separate limit.
153 // The PackedPolicySize response element indicates by percentage how close to
154 // the upper size limit the policy is, with 100% equaling the maximum allowed
155 // size.
156 Policy *string
157
158 // The identification number of the MFA device that is associated with the user
159 // who is making the AssumeRole call. Specify this value if the trust policy
160 // of the role being assumed includes a condition that requires MFA authentication.
161 // The value is either the serial number for a hardware device (such as GAHT12345678)
162 // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
163 SerialNumber *string
164
165 // The value provided by the MFA device, if the trust policy of the role being
166 // assumed requires MFA (that is, if the policy includes a condition that tests
167 // for MFA). If the role being assumed requires MFA and if the TokenCode value
168 // is missing or expired, the AssumeRole call returns an "access denied" error.
169 //
170 // If SerialNumber is set and neither TokenCode nor TokenProvider are also
171 // set an error will be returned.
172 TokenCode *string
173
174 // Async method of providing MFA token code for assuming an IAM role with MFA.
175 // The value returned by the function will be used as the TokenCode in the Retrieve
176 // call. See StdinTokenProvider for a provider that prompts and reads from stdin.
177 //
178 // This token provider will be called when ever the assumed role's
179 // credentials need to be refreshed when SerialNumber is also set and
180 // TokenCode is not set.
181 //
182 // If both TokenCode and TokenProvider is set, TokenProvider will be used and
183 // TokenCode is ignored.
184 TokenProvider func() (string, error)
185
186 // ExpiryWindow will allow the credentials to trigger refreshing prior to
187 // the credentials actually expiring. This is beneficial so race conditions
188 // with expiring credentials do not cause request to fail unexpectedly
189 // due to ExpiredTokenException exceptions.
190 //
191 // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
192 // 10 seconds before the credentials are actually expired.
193 //
194 // If ExpiryWindow is 0 or less it will be ignored.
195 ExpiryWindow time.Duration
196}
197
198// NewCredentials returns a pointer to a new Credentials object wrapping the
199// AssumeRoleProvider. The credentials will expire every 15 minutes and the
200// role will be named after a nanosecond timestamp of this operation.
201//
202// Takes a Config provider to create the STS client. The ConfigProvider is
203// satisfied by the session.Session type.
204//
205// It is safe to share the returned Credentials with multiple Sessions and
206// service clients. All access to the credentials and refreshing them
207// will be synchronized.
208func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
209 p := &AssumeRoleProvider{
210 Client: sts.New(c),
211 RoleARN: roleARN,
212 Duration: DefaultDuration,
213 }
214
215 for _, option := range options {
216 option(p)
217 }
218
219 return credentials.NewCredentials(p)
220}
221
222// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
223// AssumeRoleProvider. The credentials will expire every 15 minutes and the
224// role will be named after a nanosecond timestamp of this operation.
225//
226// Takes an AssumeRoler which can be satisfied by the STS client.
227//
228// It is safe to share the returned Credentials with multiple Sessions and
229// service clients. All access to the credentials and refreshing them
230// will be synchronized.
231func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
232 p := &AssumeRoleProvider{
233 Client: svc,
234 RoleARN: roleARN,
235 Duration: DefaultDuration,
236 }
237
238 for _, option := range options {
239 option(p)
240 }
241
242 return credentials.NewCredentials(p)
243}
244
245// Retrieve generates a new set of temporary credentials using STS.
246func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
247
248 // Apply defaults where parameters are not set.
249 if p.RoleSessionName == "" {
250 // Try to work out a role name that will hopefully end up unique.
251 p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
252 }
253 if p.Duration == 0 {
254 // Expire as often as AWS permits.
255 p.Duration = DefaultDuration
256 }
257 input := &sts.AssumeRoleInput{
258 DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
259 RoleArn: aws.String(p.RoleARN),
260 RoleSessionName: aws.String(p.RoleSessionName),
261 ExternalId: p.ExternalID,
262 }
263 if p.Policy != nil {
264 input.Policy = p.Policy
265 }
266 if p.SerialNumber != nil {
267 if p.TokenCode != nil {
268 input.SerialNumber = p.SerialNumber
269 input.TokenCode = p.TokenCode
270 } else if p.TokenProvider != nil {
271 input.SerialNumber = p.SerialNumber
272 code, err := p.TokenProvider()
273 if err != nil {
274 return credentials.Value{ProviderName: ProviderName}, err
275 }
276 input.TokenCode = aws.String(code)
277 } else {
278 return credentials.Value{ProviderName: ProviderName},
279 awserr.New("AssumeRoleTokenNotAvailable",
280 "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
281 }
282 }
283
284 roleOutput, err := p.Client.AssumeRole(input)
285 if err != nil {
286 return credentials.Value{ProviderName: ProviderName}, err
287 }
288
289 // We will proactively generate new credentials before they expire.
290 p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
291
292 return credentials.Value{
293 AccessKeyID: *roleOutput.Credentials.AccessKeyId,
294 SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
295 SessionToken: *roleOutput.Credentials.SessionToken,
296 ProviderName: ProviderName,
297 }, nil
298}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
new file mode 100644
index 0000000..07afe3b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -0,0 +1,163 @@
1// Package defaults is a collection of helpers to retrieve the SDK's default
2// configuration and handlers.
3//
4// Generally this package shouldn't be used directly, but session.Session
5// instead. This package is useful when you need to reset the defaults
6// of a session or service client to the SDK defaults before setting
7// additional parameters.
8package defaults
9
10import (
11 "fmt"
12 "net/http"
13 "net/url"
14 "os"
15 "time"
16
17 "github.com/aws/aws-sdk-go/aws"
18 "github.com/aws/aws-sdk-go/aws/awserr"
19 "github.com/aws/aws-sdk-go/aws/corehandlers"
20 "github.com/aws/aws-sdk-go/aws/credentials"
21 "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
22 "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
23 "github.com/aws/aws-sdk-go/aws/ec2metadata"
24 "github.com/aws/aws-sdk-go/aws/endpoints"
25 "github.com/aws/aws-sdk-go/aws/request"
26)
27
28// A Defaults provides a collection of default values for SDK clients.
29type Defaults struct {
30 Config *aws.Config
31 Handlers request.Handlers
32}
33
34// Get returns the SDK's default values with Config and handlers pre-configured.
35func Get() Defaults {
36 cfg := Config()
37 handlers := Handlers()
38 cfg.Credentials = CredChain(cfg, handlers)
39
40 return Defaults{
41 Config: cfg,
42 Handlers: handlers,
43 }
44}
45
46// Config returns the default configuration without credentials.
47// To retrieve a config with credentials also included use
48// `defaults.Get().Config` instead.
49//
50// Generally you shouldn't need to use this method directly, but
51// is available if you need to reset the configuration of an
52// existing service client or session.
53func Config() *aws.Config {
54 return aws.NewConfig().
55 WithCredentials(credentials.AnonymousCredentials).
56 WithRegion(os.Getenv("AWS_REGION")).
57 WithHTTPClient(http.DefaultClient).
58 WithMaxRetries(aws.UseServiceDefaultRetries).
59 WithLogger(aws.NewDefaultLogger()).
60 WithLogLevel(aws.LogOff).
61 WithEndpointResolver(endpoints.DefaultResolver())
62}
63
64// Handlers returns the default request handlers.
65//
66// Generally you shouldn't need to use this method directly, but
67// is available if you need to reset the request handlers of an
68// existing service client or session.
69func Handlers() request.Handlers {
70 var handlers request.Handlers
71
72 handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
73 handlers.Validate.AfterEachFn = request.HandlerListStopOnError
74 handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
75 handlers.Build.AfterEachFn = request.HandlerListStopOnError
76 handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
77 handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
78 handlers.Send.PushBackNamed(corehandlers.SendHandler)
79 handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
80 handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
81
82 return handlers
83}
84
85// CredChain returns the default credential chain.
86//
87// Generally you shouldn't need to use this method directly, but
88// is available if you need to reset the credentials of an
89// existing service client or session's Config.
90func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
91 return credentials.NewCredentials(&credentials.ChainProvider{
92 VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
93 Providers: []credentials.Provider{
94 &credentials.EnvProvider{},
95 &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
96 RemoteCredProvider(*cfg, handlers),
97 },
98 })
99}
100
101const (
102 httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
103 ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
104)
105
106// RemoteCredProvider returns a credentials provider for the default remote
107// endpoints such as EC2 or ECS Roles.
108func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
109 if u := os.Getenv(httpProviderEnvVar); len(u) > 0 {
110 return localHTTPCredProvider(cfg, handlers, u)
111 }
112
113 if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 {
114 u := fmt.Sprintf("http://169.254.170.2%s", uri)
115 return httpCredProvider(cfg, handlers, u)
116 }
117
118 return ec2RoleProvider(cfg, handlers)
119}
120
121func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
122 var errMsg string
123
124 parsed, err := url.Parse(u)
125 if err != nil {
126 errMsg = fmt.Sprintf("invalid URL, %v", err)
127 } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") {
128 errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host)
129 }
130
131 if len(errMsg) > 0 {
132 if cfg.Logger != nil {
133 cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
134 }
135 return credentials.ErrorProvider{
136 Err: awserr.New("CredentialsEndpointError", errMsg, err),
137 ProviderName: endpointcreds.ProviderName,
138 }
139 }
140
141 return httpCredProvider(cfg, handlers, u)
142}
143
144func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
145 return endpointcreds.NewProviderClient(cfg, handlers, u,
146 func(p *endpointcreds.Provider) {
147 p.ExpiryWindow = 5 * time.Minute
148 },
149 )
150}
151
152func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
153 resolver := cfg.EndpointResolver
154 if resolver == nil {
155 resolver = endpoints.DefaultResolver()
156 }
157
158 e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "")
159 return &ec2rolecreds.EC2RoleProvider{
160 Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion),
161 ExpiryWindow: 5 * time.Minute,
162 }
163}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go
new file mode 100644
index 0000000..4fcb616
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go
@@ -0,0 +1,56 @@
1// Package aws provides the core SDK's utilities and shared types. Use this package's
2// utilities to simplify setting and reading API operations parameters.
3//
4// Value and Pointer Conversion Utilities
5//
6// This package includes a helper conversion utility for each scalar type the SDK's
7// API use. These utilities make getting a pointer of the scalar, and dereferencing
8// a pointer easier.
9//
10// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
11// The Pointer to value will safely dereference the pointer and return its value.
12// If the pointer was nil, the scalar's zero value will be returned.
13//
14// The value to pointer functions will be named after the scalar type. So get a
15// *string from a string value use the "String" function. This makes it easy to
16// to get pointer of a literal string value, because getting the address of a
17// literal requires assigning the value to a variable first.
18//
19// var strPtr *string
20//
21// // Without the SDK's conversion functions
22// str := "my string"
23// strPtr = &str
24//
25// // With the SDK's conversion functions
26// strPtr = aws.String("my string")
27//
28// // Convert *string to string value
29// str = aws.StringValue(strPtr)
30//
31// In addition to scalars the aws package also includes conversion utilities for
32// map and slice for commonly types used in API parameters. The map and slice
33// conversion functions use similar naming pattern as the scalar conversion
34// functions.
35//
36// var strPtrs []*string
37// var strs []string = []string{"Go", "Gophers", "Go"}
38//
39// // Convert []string to []*string
40// strPtrs = aws.StringSlice(strs)
41//
42// // Convert []*string to []string
43// strs = aws.StringValueSlice(strPtrs)
44//
45// SDK Default HTTP Client
46//
47// The SDK will use the http.DefaultClient if a HTTP client is not provided to
48// the SDK's Session, or service client constructor. This means that if the
49// http.DefaultClient is modified by other components of your application the
50// modifications will be picked up by the SDK as well.
51//
52// In some cases this might be intended, but it is a better practice to create
53// a custom HTTP Client to share explicitly through your application. You can
54// configure the SDK to use the custom HTTP Client by setting the HTTPClient
55// value of the SDK's Config type when creating a Session or service client.
56package aws
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
new file mode 100644
index 0000000..984407a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -0,0 +1,162 @@
1package ec2metadata
2
3import (
4 "encoding/json"
5 "fmt"
6 "net/http"
7 "path"
8 "strings"
9 "time"
10
11 "github.com/aws/aws-sdk-go/aws/awserr"
12 "github.com/aws/aws-sdk-go/aws/request"
13)
14
15// GetMetadata uses the path provided to request information from the EC2
16// instance metdata service. The content will be returned as a string, or
17// error if the request failed.
18func (c *EC2Metadata) GetMetadata(p string) (string, error) {
19 op := &request.Operation{
20 Name: "GetMetadata",
21 HTTPMethod: "GET",
22 HTTPPath: path.Join("/", "meta-data", p),
23 }
24
25 output := &metadataOutput{}
26 req := c.NewRequest(op, nil, output)
27
28 return output.Content, req.Send()
29}
30
31// GetUserData returns the userdata that was configured for the service. If
32// there is no user-data setup for the EC2 instance a "NotFoundError" error
33// code will be returned.
34func (c *EC2Metadata) GetUserData() (string, error) {
35 op := &request.Operation{
36 Name: "GetUserData",
37 HTTPMethod: "GET",
38 HTTPPath: path.Join("/", "user-data"),
39 }
40
41 output := &metadataOutput{}
42 req := c.NewRequest(op, nil, output)
43 req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
44 if r.HTTPResponse.StatusCode == http.StatusNotFound {
45 r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
46 }
47 })
48
49 return output.Content, req.Send()
50}
51
52// GetDynamicData uses the path provided to request information from the EC2
53// instance metadata service for dynamic data. The content will be returned
54// as a string, or error if the request failed.
55func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
56 op := &request.Operation{
57 Name: "GetDynamicData",
58 HTTPMethod: "GET",
59 HTTPPath: path.Join("/", "dynamic", p),
60 }
61
62 output := &metadataOutput{}
63 req := c.NewRequest(op, nil, output)
64
65 return output.Content, req.Send()
66}
67
68// GetInstanceIdentityDocument retrieves an identity document describing an
69// instance. Error is returned if the request fails or is unable to parse
70// the response.
71func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
72 resp, err := c.GetDynamicData("instance-identity/document")
73 if err != nil {
74 return EC2InstanceIdentityDocument{},
75 awserr.New("EC2MetadataRequestError",
76 "failed to get EC2 instance identity document", err)
77 }
78
79 doc := EC2InstanceIdentityDocument{}
80 if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
81 return EC2InstanceIdentityDocument{},
82 awserr.New("SerializationError",
83 "failed to decode EC2 instance identity document", err)
84 }
85
86 return doc, nil
87}
88
89// IAMInfo retrieves IAM info from the metadata API
90func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
91 resp, err := c.GetMetadata("iam/info")
92 if err != nil {
93 return EC2IAMInfo{},
94 awserr.New("EC2MetadataRequestError",
95 "failed to get EC2 IAM info", err)
96 }
97
98 info := EC2IAMInfo{}
99 if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
100 return EC2IAMInfo{},
101 awserr.New("SerializationError",
102 "failed to decode EC2 IAM info", err)
103 }
104
105 if info.Code != "Success" {
106 errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
107 return EC2IAMInfo{},
108 awserr.New("EC2MetadataError", errMsg, nil)
109 }
110
111 return info, nil
112}
113
114// Region returns the region the instance is running in.
115func (c *EC2Metadata) Region() (string, error) {
116 resp, err := c.GetMetadata("placement/availability-zone")
117 if err != nil {
118 return "", err
119 }
120
121 // returns region without the suffix. Eg: us-west-2a becomes us-west-2
122 return resp[:len(resp)-1], nil
123}
124
125// Available returns if the application has access to the EC2 Metadata service.
126// Can be used to determine if application is running within an EC2 Instance and
127// the metadata service is available.
128func (c *EC2Metadata) Available() bool {
129 if _, err := c.GetMetadata("instance-id"); err != nil {
130 return false
131 }
132
133 return true
134}
135
136// An EC2IAMInfo provides the shape for unmarshaling
137// an IAM info from the metadata API
138type EC2IAMInfo struct {
139 Code string
140 LastUpdated time.Time
141 InstanceProfileArn string
142 InstanceProfileID string
143}
144
145// An EC2InstanceIdentityDocument provides the shape for unmarshaling
146// an instance identity document
147type EC2InstanceIdentityDocument struct {
148 DevpayProductCodes []string `json:"devpayProductCodes"`
149 AvailabilityZone string `json:"availabilityZone"`
150 PrivateIP string `json:"privateIp"`
151 Version string `json:"version"`
152 Region string `json:"region"`
153 InstanceID string `json:"instanceId"`
154 BillingProducts []string `json:"billingProducts"`
155 InstanceType string `json:"instanceType"`
156 AccountID string `json:"accountId"`
157 PendingTime time.Time `json:"pendingTime"`
158 ImageID string `json:"imageId"`
159 KernelID string `json:"kernelId"`
160 RamdiskID string `json:"ramdiskId"`
161 Architecture string `json:"architecture"`
162}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
new file mode 100644
index 0000000..5b4379d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -0,0 +1,124 @@
1// Package ec2metadata provides the client for making API calls to the
2// EC2 Metadata service.
3package ec2metadata
4
5import (
6 "bytes"
7 "errors"
8 "io"
9 "net/http"
10 "time"
11
12 "github.com/aws/aws-sdk-go/aws"
13 "github.com/aws/aws-sdk-go/aws/awserr"
14 "github.com/aws/aws-sdk-go/aws/client"
15 "github.com/aws/aws-sdk-go/aws/client/metadata"
16 "github.com/aws/aws-sdk-go/aws/request"
17)
18
19// ServiceName is the name of the service.
20const ServiceName = "ec2metadata"
21
22// A EC2Metadata is an EC2 Metadata service Client.
23type EC2Metadata struct {
24 *client.Client
25}
26
27// New creates a new instance of the EC2Metadata client with a session.
28// This client is safe to use across multiple goroutines.
29//
30//
31// Example:
32// // Create a EC2Metadata client from just a session.
33// svc := ec2metadata.New(mySession)
34//
35// // Create a EC2Metadata client with additional configuration
36// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
37func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
38 c := p.ClientConfig(ServiceName, cfgs...)
39 return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
40}
41
42// NewClient returns a new EC2Metadata client. Should be used to create
43// a client when not using a session. Generally using just New with a session
44// is preferred.
45//
46// If an unmodified HTTP client is provided from the stdlib default, or no client
47// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
48// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
49func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
50 if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
51 // If the http client is unmodified and this feature is not disabled
52 // set custom timeouts for EC2Metadata requests.
53 cfg.HTTPClient = &http.Client{
54 // use a shorter timeout than default because the metadata
55 // service is local if it is running, and to fail faster
56 // if not running on an ec2 instance.
57 Timeout: 5 * time.Second,
58 }
59 }
60
61 svc := &EC2Metadata{
62 Client: client.New(
63 cfg,
64 metadata.ClientInfo{
65 ServiceName: ServiceName,
66 Endpoint: endpoint,
67 APIVersion: "latest",
68 },
69 handlers,
70 ),
71 }
72
73 svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
74 svc.Handlers.UnmarshalError.PushBack(unmarshalError)
75 svc.Handlers.Validate.Clear()
76 svc.Handlers.Validate.PushBack(validateEndpointHandler)
77
78 // Add additional options to the service config
79 for _, option := range opts {
80 option(svc.Client)
81 }
82
83 return svc
84}
85
86func httpClientZero(c *http.Client) bool {
87 return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
88}
89
90type metadataOutput struct {
91 Content string
92}
93
94func unmarshalHandler(r *request.Request) {
95 defer r.HTTPResponse.Body.Close()
96 b := &bytes.Buffer{}
97 if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
98 r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
99 return
100 }
101
102 if data, ok := r.Data.(*metadataOutput); ok {
103 data.Content = b.String()
104 }
105}
106
107func unmarshalError(r *request.Request) {
108 defer r.HTTPResponse.Body.Close()
109 b := &bytes.Buffer{}
110 if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
111 r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
112 return
113 }
114
115 // Response body format is not consistent between metadata endpoints.
116 // Grab the error message as a string and include that as the source error
117 r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
118}
119
120func validateEndpointHandler(r *request.Request) {
121 if r.ClientInfo.Endpoint == "" {
122 r.Error = aws.ErrMissingEndpoint
123 }
124}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
new file mode 100644
index 0000000..74f72de
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
@@ -0,0 +1,133 @@
1package endpoints
2
3import (
4 "encoding/json"
5 "fmt"
6 "io"
7
8 "github.com/aws/aws-sdk-go/aws/awserr"
9)
10
11type modelDefinition map[string]json.RawMessage
12
13// A DecodeModelOptions are the options for how the endpoints model definition
14// are decoded.
15type DecodeModelOptions struct {
16 SkipCustomizations bool
17}
18
19// Set combines all of the option functions together.
20func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
21 for _, fn := range optFns {
22 fn(d)
23 }
24}
25
26// DecodeModel unmarshals a Regions and Endpoint model definition file into
27// a endpoint Resolver. If the file format is not supported, or an error occurs
28// when unmarshaling the model an error will be returned.
29//
30// Casting the return value of this func to a EnumPartitions will
31// allow you to get a list of the partitions in the order the endpoints
32// will be resolved in.
33//
34// resolver, err := endpoints.DecodeModel(reader)
35//
36// partitions := resolver.(endpoints.EnumPartitions).Partitions()
37// for _, p := range partitions {
38// // ... inspect partitions
39// }
40func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
41 var opts DecodeModelOptions
42 opts.Set(optFns...)
43
44 // Get the version of the partition file to determine what
45 // unmarshaling model to use.
46 modelDef := modelDefinition{}
47 if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
48 return nil, newDecodeModelError("failed to decode endpoints model", err)
49 }
50
51 var version string
52 if b, ok := modelDef["version"]; ok {
53 version = string(b)
54 } else {
55 return nil, newDecodeModelError("endpoints version not found in model", nil)
56 }
57
58 if version == "3" {
59 return decodeV3Endpoints(modelDef, opts)
60 }
61
62 return nil, newDecodeModelError(
63 fmt.Sprintf("endpoints version %s, not supported", version), nil)
64}
65
66func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
67 b, ok := modelDef["partitions"]
68 if !ok {
69 return nil, newDecodeModelError("endpoints model missing partitions", nil)
70 }
71
72 ps := partitions{}
73 if err := json.Unmarshal(b, &ps); err != nil {
74 return nil, newDecodeModelError("failed to decode endpoints model", err)
75 }
76
77 if opts.SkipCustomizations {
78 return ps, nil
79 }
80
81 // Customization
82 for i := 0; i < len(ps); i++ {
83 p := &ps[i]
84 custAddEC2Metadata(p)
85 custAddS3DualStack(p)
86 custRmIotDataService(p)
87 }
88
89 return ps, nil
90}
91
92func custAddS3DualStack(p *partition) {
93 if p.ID != "aws" {
94 return
95 }
96
97 s, ok := p.Services["s3"]
98 if !ok {
99 return
100 }
101
102 s.Defaults.HasDualStack = boxedTrue
103 s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
104
105 p.Services["s3"] = s
106}
107
108func custAddEC2Metadata(p *partition) {
109 p.Services["ec2metadata"] = service{
110 IsRegionalized: boxedFalse,
111 PartitionEndpoint: "aws-global",
112 Endpoints: endpoints{
113 "aws-global": endpoint{
114 Hostname: "169.254.169.254/latest",
115 Protocols: []string{"http"},
116 },
117 },
118 }
119}
120
121func custRmIotDataService(p *partition) {
122 delete(p.Services, "data.iot")
123}
124
125type decodeModelError struct {
126 awsError
127}
128
129func newDecodeModelError(msg string, err error) decodeModelError {
130 return decodeModelError{
131 awsError: awserr.New("DecodeEndpointsModelError", msg, err),
132 }
133}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
new file mode 100644
index 0000000..e6d7ede
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -0,0 +1,2174 @@
1// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
2
3package endpoints
4
5import (
6 "regexp"
7)
8
9// Partition identifiers
10const (
11 AwsPartitionID = "aws" // AWS Standard partition.
12 AwsCnPartitionID = "aws-cn" // AWS China partition.
13 AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition.
14)
15
16// AWS Standard partition's regions.
17const (
18 ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo).
19 ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
20 ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
21 ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
22 ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
23 CaCentral1RegionID = "ca-central-1" // Canada (Central).
24 EuCentral1RegionID = "eu-central-1" // EU (Frankfurt).
25 EuWest1RegionID = "eu-west-1" // EU (Ireland).
26 EuWest2RegionID = "eu-west-2" // EU (London).
27 SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
28 UsEast1RegionID = "us-east-1" // US East (N. Virginia).
29 UsEast2RegionID = "us-east-2" // US East (Ohio).
30 UsWest1RegionID = "us-west-1" // US West (N. California).
31 UsWest2RegionID = "us-west-2" // US West (Oregon).
32)
33
34// AWS China partition's regions.
35const (
36 CnNorth1RegionID = "cn-north-1" // China (Beijing).
37)
38
39// AWS GovCloud (US) partition's regions.
40const (
41 UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US).
42)
43
44// Service identifiers
45const (
46 AcmServiceID = "acm" // Acm.
47 ApigatewayServiceID = "apigateway" // Apigateway.
48 ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
49 Appstream2ServiceID = "appstream2" // Appstream2.
50 AutoscalingServiceID = "autoscaling" // Autoscaling.
51 BatchServiceID = "batch" // Batch.
52 BudgetsServiceID = "budgets" // Budgets.
53 ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
54 CloudformationServiceID = "cloudformation" // Cloudformation.
55 CloudfrontServiceID = "cloudfront" // Cloudfront.
56 CloudhsmServiceID = "cloudhsm" // Cloudhsm.
57 CloudsearchServiceID = "cloudsearch" // Cloudsearch.
58 CloudtrailServiceID = "cloudtrail" // Cloudtrail.
59 CodebuildServiceID = "codebuild" // Codebuild.
60 CodecommitServiceID = "codecommit" // Codecommit.
61 CodedeployServiceID = "codedeploy" // Codedeploy.
62 CodepipelineServiceID = "codepipeline" // Codepipeline.
63 CodestarServiceID = "codestar" // Codestar.
64 CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
65 CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
66 CognitoSyncServiceID = "cognito-sync" // CognitoSync.
67 ConfigServiceID = "config" // Config.
68 CurServiceID = "cur" // Cur.
69 DatapipelineServiceID = "datapipeline" // Datapipeline.
70 DevicefarmServiceID = "devicefarm" // Devicefarm.
71 DirectconnectServiceID = "directconnect" // Directconnect.
72 DiscoveryServiceID = "discovery" // Discovery.
73 DmsServiceID = "dms" // Dms.
74 DsServiceID = "ds" // Ds.
75 DynamodbServiceID = "dynamodb" // Dynamodb.
76 Ec2ServiceID = "ec2" // Ec2.
77 Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
78 EcrServiceID = "ecr" // Ecr.
79 EcsServiceID = "ecs" // Ecs.
80 ElasticacheServiceID = "elasticache" // Elasticache.
81 ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
82 ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
83 ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
84 ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
85 ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
86 EmailServiceID = "email" // Email.
87 EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
88 EsServiceID = "es" // Es.
89 EventsServiceID = "events" // Events.
90 FirehoseServiceID = "firehose" // Firehose.
91 GameliftServiceID = "gamelift" // Gamelift.
92 GlacierServiceID = "glacier" // Glacier.
93 HealthServiceID = "health" // Health.
94 IamServiceID = "iam" // Iam.
95 ImportexportServiceID = "importexport" // Importexport.
96 InspectorServiceID = "inspector" // Inspector.
97 IotServiceID = "iot" // Iot.
98 KinesisServiceID = "kinesis" // Kinesis.
99 KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
100 KmsServiceID = "kms" // Kms.
101 LambdaServiceID = "lambda" // Lambda.
102 LightsailServiceID = "lightsail" // Lightsail.
103 LogsServiceID = "logs" // Logs.
104 MachinelearningServiceID = "machinelearning" // Machinelearning.
105 MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
106 MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
107 MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
108 ModelsLexServiceID = "models.lex" // ModelsLex.
109 MonitoringServiceID = "monitoring" // Monitoring.
110 MturkRequesterServiceID = "mturk-requester" // MturkRequester.
111 OpsworksServiceID = "opsworks" // Opsworks.
112 OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
113 OrganizationsServiceID = "organizations" // Organizations.
114 PinpointServiceID = "pinpoint" // Pinpoint.
115 PollyServiceID = "polly" // Polly.
116 RdsServiceID = "rds" // Rds.
117 RedshiftServiceID = "redshift" // Redshift.
118 RekognitionServiceID = "rekognition" // Rekognition.
119 Route53ServiceID = "route53" // Route53.
120 Route53domainsServiceID = "route53domains" // Route53domains.
121 RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
122 S3ServiceID = "s3" // S3.
123 SdbServiceID = "sdb" // Sdb.
124 ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
125 ShieldServiceID = "shield" // Shield.
126 SmsServiceID = "sms" // Sms.
127 SnowballServiceID = "snowball" // Snowball.
128 SnsServiceID = "sns" // Sns.
129 SqsServiceID = "sqs" // Sqs.
130 SsmServiceID = "ssm" // Ssm.
131 StatesServiceID = "states" // States.
132 StoragegatewayServiceID = "storagegateway" // Storagegateway.
133 StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
134 StsServiceID = "sts" // Sts.
135 SupportServiceID = "support" // Support.
136 SwfServiceID = "swf" // Swf.
137 TaggingServiceID = "tagging" // Tagging.
138 WafServiceID = "waf" // Waf.
139 WafRegionalServiceID = "waf-regional" // WafRegional.
140 WorkdocsServiceID = "workdocs" // Workdocs.
141 WorkspacesServiceID = "workspaces" // Workspaces.
142 XrayServiceID = "xray" // Xray.
143)
144
145// DefaultResolver returns an Endpoint resolver that will be able
146// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US).
147//
148// Use DefaultPartitions() to get the list of the default partitions.
149func DefaultResolver() Resolver {
150 return defaultPartitions
151}
152
153// DefaultPartitions returns a list of the partitions the SDK is bundled
154// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US).
155//
156// partitions := endpoints.DefaultPartitions
157// for _, p := range partitions {
158// // ... inspect partitions
159// }
160func DefaultPartitions() []Partition {
161 return defaultPartitions.Partitions()
162}
163
164var defaultPartitions = partitions{
165 awsPartition,
166 awscnPartition,
167 awsusgovPartition,
168}
169
170// AwsPartition returns the Resolver for AWS Standard.
171func AwsPartition() Partition {
172 return awsPartition.Partition()
173}
174
175var awsPartition = partition{
176 ID: "aws",
177 Name: "AWS Standard",
178 DNSSuffix: "amazonaws.com",
179 RegionRegex: regionRegex{
180 Regexp: func() *regexp.Regexp {
181 reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$")
182 return reg
183 }(),
184 },
185 Defaults: endpoint{
186 Hostname: "{service}.{region}.{dnsSuffix}",
187 Protocols: []string{"https"},
188 SignatureVersions: []string{"v4"},
189 },
190 Regions: regions{
191 "ap-northeast-1": region{
192 Description: "Asia Pacific (Tokyo)",
193 },
194 "ap-northeast-2": region{
195 Description: "Asia Pacific (Seoul)",
196 },
197 "ap-south-1": region{
198 Description: "Asia Pacific (Mumbai)",
199 },
200 "ap-southeast-1": region{
201 Description: "Asia Pacific (Singapore)",
202 },
203 "ap-southeast-2": region{
204 Description: "Asia Pacific (Sydney)",
205 },
206 "ca-central-1": region{
207 Description: "Canada (Central)",
208 },
209 "eu-central-1": region{
210 Description: "EU (Frankfurt)",
211 },
212 "eu-west-1": region{
213 Description: "EU (Ireland)",
214 },
215 "eu-west-2": region{
216 Description: "EU (London)",
217 },
218 "sa-east-1": region{
219 Description: "South America (Sao Paulo)",
220 },
221 "us-east-1": region{
222 Description: "US East (N. Virginia)",
223 },
224 "us-east-2": region{
225 Description: "US East (Ohio)",
226 },
227 "us-west-1": region{
228 Description: "US West (N. California)",
229 },
230 "us-west-2": region{
231 Description: "US West (Oregon)",
232 },
233 },
234 Services: services{
235 "acm": service{
236
237 Endpoints: endpoints{
238 "ap-northeast-1": endpoint{},
239 "ap-northeast-2": endpoint{},
240 "ap-south-1": endpoint{},
241 "ap-southeast-1": endpoint{},
242 "ap-southeast-2": endpoint{},
243 "ca-central-1": endpoint{},
244 "eu-central-1": endpoint{},
245 "eu-west-1": endpoint{},
246 "eu-west-2": endpoint{},
247 "sa-east-1": endpoint{},
248 "us-east-1": endpoint{},
249 "us-east-2": endpoint{},
250 "us-west-1": endpoint{},
251 "us-west-2": endpoint{},
252 },
253 },
254 "apigateway": service{
255
256 Endpoints: endpoints{
257 "ap-northeast-1": endpoint{},
258 "ap-northeast-2": endpoint{},
259 "ap-south-1": endpoint{},
260 "ap-southeast-1": endpoint{},
261 "ap-southeast-2": endpoint{},
262 "eu-central-1": endpoint{},
263 "eu-west-1": endpoint{},
264 "eu-west-2": endpoint{},
265 "us-east-1": endpoint{},
266 "us-east-2": endpoint{},
267 "us-west-1": endpoint{},
268 "us-west-2": endpoint{},
269 },
270 },
271 "application-autoscaling": service{
272 Defaults: endpoint{
273 Hostname: "autoscaling.{region}.amazonaws.com",
274 Protocols: []string{"http", "https"},
275 CredentialScope: credentialScope{
276 Service: "application-autoscaling",
277 },
278 },
279 Endpoints: endpoints{
280 "ap-northeast-1": endpoint{},
281 "ap-northeast-2": endpoint{},
282 "ap-south-1": endpoint{},
283 "ap-southeast-1": endpoint{},
284 "ap-southeast-2": endpoint{},
285 "ca-central-1": endpoint{},
286 "eu-central-1": endpoint{},
287 "eu-west-1": endpoint{},
288 "eu-west-2": endpoint{},
289 "sa-east-1": endpoint{},
290 "us-east-1": endpoint{},
291 "us-east-2": endpoint{},
292 "us-west-1": endpoint{},
293 "us-west-2": endpoint{},
294 },
295 },
296 "appstream2": service{
297 Defaults: endpoint{
298 Protocols: []string{"https"},
299 CredentialScope: credentialScope{
300 Service: "appstream",
301 },
302 },
303 Endpoints: endpoints{
304 "ap-northeast-1": endpoint{},
305 "eu-west-1": endpoint{},
306 "us-east-1": endpoint{},
307 "us-west-2": endpoint{},
308 },
309 },
310 "autoscaling": service{
311 Defaults: endpoint{
312 Protocols: []string{"http", "https"},
313 },
314 Endpoints: endpoints{
315 "ap-northeast-1": endpoint{},
316 "ap-northeast-2": endpoint{},
317 "ap-south-1": endpoint{},
318 "ap-southeast-1": endpoint{},
319 "ap-southeast-2": endpoint{},
320 "ca-central-1": endpoint{},
321 "eu-central-1": endpoint{},
322 "eu-west-1": endpoint{},
323 "eu-west-2": endpoint{},
324 "sa-east-1": endpoint{},
325 "us-east-1": endpoint{},
326 "us-east-2": endpoint{},
327 "us-west-1": endpoint{},
328 "us-west-2": endpoint{},
329 },
330 },
331 "batch": service{
332
333 Endpoints: endpoints{
334 "us-east-1": endpoint{},
335 },
336 },
337 "budgets": service{
338 PartitionEndpoint: "aws-global",
339 IsRegionalized: boxedFalse,
340
341 Endpoints: endpoints{
342 "aws-global": endpoint{
343 Hostname: "budgets.amazonaws.com",
344 CredentialScope: credentialScope{
345 Region: "us-east-1",
346 },
347 },
348 },
349 },
350 "clouddirectory": service{
351
352 Endpoints: endpoints{
353 "ap-southeast-1": endpoint{},
354 "ap-southeast-2": endpoint{},
355 "eu-west-1": endpoint{},
356 "eu-west-2": endpoint{},
357 "us-east-1": endpoint{},
358 "us-east-2": endpoint{},
359 "us-west-2": endpoint{},
360 },
361 },
362 "cloudformation": service{
363
364 Endpoints: endpoints{
365 "ap-northeast-1": endpoint{},
366 "ap-northeast-2": endpoint{},
367 "ap-south-1": endpoint{},
368 "ap-southeast-1": endpoint{},
369 "ap-southeast-2": endpoint{},
370 "ca-central-1": endpoint{},
371 "eu-central-1": endpoint{},
372 "eu-west-1": endpoint{},
373 "eu-west-2": endpoint{},
374 "sa-east-1": endpoint{},
375 "us-east-1": endpoint{},
376 "us-east-2": endpoint{},
377 "us-west-1": endpoint{},
378 "us-west-2": endpoint{},
379 },
380 },
381 "cloudfront": service{
382 PartitionEndpoint: "aws-global",
383 IsRegionalized: boxedFalse,
384
385 Endpoints: endpoints{
386 "aws-global": endpoint{
387 Hostname: "cloudfront.amazonaws.com",
388 Protocols: []string{"http", "https"},
389 CredentialScope: credentialScope{
390 Region: "us-east-1",
391 },
392 },
393 },
394 },
395 "cloudhsm": service{
396
397 Endpoints: endpoints{
398 "ap-northeast-1": endpoint{},
399 "ap-southeast-1": endpoint{},
400 "ap-southeast-2": endpoint{},
401 "ca-central-1": endpoint{},
402 "eu-central-1": endpoint{},
403 "eu-west-1": endpoint{},
404 "us-east-1": endpoint{},
405 "us-east-2": endpoint{},
406 "us-west-1": endpoint{},
407 "us-west-2": endpoint{},
408 },
409 },
410 "cloudsearch": service{
411
412 Endpoints: endpoints{
413 "ap-northeast-1": endpoint{},
414 "ap-northeast-2": endpoint{},
415 "ap-southeast-1": endpoint{},
416 "ap-southeast-2": endpoint{},
417 "eu-central-1": endpoint{},
418 "eu-west-1": endpoint{},
419 "sa-east-1": endpoint{},
420 "us-east-1": endpoint{},
421 "us-west-1": endpoint{},
422 "us-west-2": endpoint{},
423 },
424 },
425 "cloudtrail": service{
426
427 Endpoints: endpoints{
428 "ap-northeast-1": endpoint{},
429 "ap-northeast-2": endpoint{},
430 "ap-south-1": endpoint{},
431 "ap-southeast-1": endpoint{},
432 "ap-southeast-2": endpoint{},
433 "ca-central-1": endpoint{},
434 "eu-central-1": endpoint{},
435 "eu-west-1": endpoint{},
436 "eu-west-2": endpoint{},
437 "sa-east-1": endpoint{},
438 "us-east-1": endpoint{},
439 "us-east-2": endpoint{},
440 "us-west-1": endpoint{},
441 "us-west-2": endpoint{},
442 },
443 },
444 "codebuild": service{
445
446 Endpoints: endpoints{
447 "ap-northeast-1": endpoint{},
448 "ap-southeast-1": endpoint{},
449 "ap-southeast-2": endpoint{},
450 "eu-central-1": endpoint{},
451 "eu-west-1": endpoint{},
452 "us-east-1": endpoint{},
453 "us-east-2": endpoint{},
454 "us-west-2": endpoint{},
455 },
456 },
457 "codecommit": service{
458
459 Endpoints: endpoints{
460 "eu-west-1": endpoint{},
461 "us-east-1": endpoint{},
462 "us-east-2": endpoint{},
463 "us-west-2": endpoint{},
464 },
465 },
466 "codedeploy": service{
467
468 Endpoints: endpoints{
469 "ap-northeast-1": endpoint{},
470 "ap-northeast-2": endpoint{},
471 "ap-south-1": endpoint{},
472 "ap-southeast-1": endpoint{},
473 "ap-southeast-2": endpoint{},
474 "ca-central-1": endpoint{},
475 "eu-central-1": endpoint{},
476 "eu-west-1": endpoint{},
477 "eu-west-2": endpoint{},
478 "sa-east-1": endpoint{},
479 "us-east-1": endpoint{},
480 "us-east-2": endpoint{},
481 "us-west-1": endpoint{},
482 "us-west-2": endpoint{},
483 },
484 },
485 "codepipeline": service{
486
487 Endpoints: endpoints{
488 "ap-northeast-1": endpoint{},
489 "ap-southeast-1": endpoint{},
490 "ap-southeast-2": endpoint{},
491 "eu-central-1": endpoint{},
492 "eu-west-1": endpoint{},
493 "sa-east-1": endpoint{},
494 "us-east-1": endpoint{},
495 "us-east-2": endpoint{},
496 "us-west-2": endpoint{},
497 },
498 },
499 "codestar": service{
500
501 Endpoints: endpoints{
502 "eu-west-1": endpoint{},
503 "us-east-1": endpoint{},
504 "us-east-2": endpoint{},
505 "us-west-2": endpoint{},
506 },
507 },
508 "cognito-identity": service{
509
510 Endpoints: endpoints{
511 "ap-northeast-1": endpoint{},
512 "ap-northeast-2": endpoint{},
513 "ap-south-1": endpoint{},
514 "ap-southeast-2": endpoint{},
515 "eu-central-1": endpoint{},
516 "eu-west-1": endpoint{},
517 "eu-west-2": endpoint{},
518 "us-east-1": endpoint{},
519 "us-east-2": endpoint{},
520 "us-west-2": endpoint{},
521 },
522 },
523 "cognito-idp": service{
524
525 Endpoints: endpoints{
526 "ap-northeast-1": endpoint{},
527 "ap-northeast-2": endpoint{},
528 "ap-south-1": endpoint{},
529 "ap-southeast-2": endpoint{},
530 "eu-central-1": endpoint{},
531 "eu-west-1": endpoint{},
532 "eu-west-2": endpoint{},
533 "us-east-1": endpoint{},
534 "us-east-2": endpoint{},
535 "us-west-2": endpoint{},
536 },
537 },
538 "cognito-sync": service{
539
540 Endpoints: endpoints{
541 "ap-northeast-1": endpoint{},
542 "ap-northeast-2": endpoint{},
543 "ap-south-1": endpoint{},
544 "ap-southeast-2": endpoint{},
545 "eu-central-1": endpoint{},
546 "eu-west-1": endpoint{},
547 "eu-west-2": endpoint{},
548 "us-east-1": endpoint{},
549 "us-east-2": endpoint{},
550 "us-west-2": endpoint{},
551 },
552 },
553 "config": service{
554
555 Endpoints: endpoints{
556 "ap-northeast-1": endpoint{},
557 "ap-northeast-2": endpoint{},
558 "ap-south-1": endpoint{},
559 "ap-southeast-1": endpoint{},
560 "ap-southeast-2": endpoint{},
561 "ca-central-1": endpoint{},
562 "eu-central-1": endpoint{},
563 "eu-west-1": endpoint{},
564 "eu-west-2": endpoint{},
565 "sa-east-1": endpoint{},
566 "us-east-1": endpoint{},
567 "us-east-2": endpoint{},
568 "us-west-1": endpoint{},
569 "us-west-2": endpoint{},
570 },
571 },
572 "cur": service{
573
574 Endpoints: endpoints{
575 "us-east-1": endpoint{},
576 },
577 },
578 "datapipeline": service{
579
580 Endpoints: endpoints{
581 "ap-northeast-1": endpoint{},
582 "ap-southeast-2": endpoint{},
583 "eu-west-1": endpoint{},
584 "us-east-1": endpoint{},
585 "us-west-2": endpoint{},
586 },
587 },
588 "devicefarm": service{
589
590 Endpoints: endpoints{
591 "us-west-2": endpoint{},
592 },
593 },
594 "directconnect": service{
595
596 Endpoints: endpoints{
597 "ap-northeast-1": endpoint{},
598 "ap-northeast-2": endpoint{},
599 "ap-south-1": endpoint{},
600 "ap-southeast-1": endpoint{},
601 "ap-southeast-2": endpoint{},
602 "ca-central-1": endpoint{},
603 "eu-central-1": endpoint{},
604 "eu-west-1": endpoint{},
605 "eu-west-2": endpoint{},
606 "sa-east-1": endpoint{},
607 "us-east-1": endpoint{},
608 "us-east-2": endpoint{},
609 "us-west-1": endpoint{},
610 "us-west-2": endpoint{},
611 },
612 },
613 "discovery": service{
614
615 Endpoints: endpoints{
616 "us-west-2": endpoint{},
617 },
618 },
619 "dms": service{
620
621 Endpoints: endpoints{
622 "ap-northeast-1": endpoint{},
623 "ap-northeast-2": endpoint{},
624 "ap-south-1": endpoint{},
625 "ap-southeast-1": endpoint{},
626 "ap-southeast-2": endpoint{},
627 "ca-central-1": endpoint{},
628 "eu-central-1": endpoint{},
629 "eu-west-1": endpoint{},
630 "eu-west-2": endpoint{},
631 "sa-east-1": endpoint{},
632 "us-east-1": endpoint{},
633 "us-east-2": endpoint{},
634 "us-west-1": endpoint{},
635 "us-west-2": endpoint{},
636 },
637 },
638 "ds": service{
639
640 Endpoints: endpoints{
641 "ap-northeast-1": endpoint{},
642 "ap-southeast-1": endpoint{},
643 "ap-southeast-2": endpoint{},
644 "eu-central-1": endpoint{},
645 "eu-west-1": endpoint{},
646 "us-east-1": endpoint{},
647 "us-west-2": endpoint{},
648 },
649 },
650 "dynamodb": service{
651 Defaults: endpoint{
652 Protocols: []string{"http", "https"},
653 },
654 Endpoints: endpoints{
655 "ap-northeast-1": endpoint{},
656 "ap-northeast-2": endpoint{},
657 "ap-south-1": endpoint{},
658 "ap-southeast-1": endpoint{},
659 "ap-southeast-2": endpoint{},
660 "ca-central-1": endpoint{},
661 "eu-central-1": endpoint{},
662 "eu-west-1": endpoint{},
663 "eu-west-2": endpoint{},
664 "local": endpoint{
665 Hostname: "localhost:8000",
666 Protocols: []string{"http"},
667 CredentialScope: credentialScope{
668 Region: "us-east-1",
669 },
670 },
671 "sa-east-1": endpoint{},
672 "us-east-1": endpoint{},
673 "us-east-2": endpoint{},
674 "us-west-1": endpoint{},
675 "us-west-2": endpoint{},
676 },
677 },
678 "ec2": service{
679 Defaults: endpoint{
680 Protocols: []string{"http", "https"},
681 },
682 Endpoints: endpoints{
683 "ap-northeast-1": endpoint{},
684 "ap-northeast-2": endpoint{},
685 "ap-south-1": endpoint{},
686 "ap-southeast-1": endpoint{},
687 "ap-southeast-2": endpoint{},
688 "ca-central-1": endpoint{},
689 "eu-central-1": endpoint{},
690 "eu-west-1": endpoint{},
691 "eu-west-2": endpoint{},
692 "sa-east-1": endpoint{},
693 "us-east-1": endpoint{},
694 "us-east-2": endpoint{},
695 "us-west-1": endpoint{},
696 "us-west-2": endpoint{},
697 },
698 },
699 "ec2metadata": service{
700 PartitionEndpoint: "aws-global",
701 IsRegionalized: boxedFalse,
702
703 Endpoints: endpoints{
704 "aws-global": endpoint{
705 Hostname: "169.254.169.254/latest",
706 Protocols: []string{"http"},
707 },
708 },
709 },
710 "ecr": service{
711
712 Endpoints: endpoints{
713 "ap-northeast-1": endpoint{},
714 "ap-southeast-1": endpoint{},
715 "ap-southeast-2": endpoint{},
716 "ca-central-1": endpoint{},
717 "eu-central-1": endpoint{},
718 "eu-west-1": endpoint{},
719 "eu-west-2": endpoint{},
720 "us-east-1": endpoint{},
721 "us-east-2": endpoint{},
722 "us-west-1": endpoint{},
723 "us-west-2": endpoint{},
724 },
725 },
726 "ecs": service{
727
728 Endpoints: endpoints{
729 "ap-northeast-1": endpoint{},
730 "ap-southeast-1": endpoint{},
731 "ap-southeast-2": endpoint{},
732 "ca-central-1": endpoint{},
733 "eu-central-1": endpoint{},
734 "eu-west-1": endpoint{},
735 "eu-west-2": endpoint{},
736 "us-east-1": endpoint{},
737 "us-east-2": endpoint{},
738 "us-west-1": endpoint{},
739 "us-west-2": endpoint{},
740 },
741 },
742 "elasticache": service{
743
744 Endpoints: endpoints{
745 "ap-northeast-1": endpoint{},
746 "ap-northeast-2": endpoint{},
747 "ap-south-1": endpoint{},
748 "ap-southeast-1": endpoint{},
749 "ap-southeast-2": endpoint{},
750 "ca-central-1": endpoint{},
751 "eu-central-1": endpoint{},
752 "eu-west-1": endpoint{},
753 "eu-west-2": endpoint{},
754 "sa-east-1": endpoint{},
755 "us-east-1": endpoint{},
756 "us-east-2": endpoint{},
757 "us-west-1": endpoint{},
758 "us-west-2": endpoint{},
759 },
760 },
761 "elasticbeanstalk": service{
762
763 Endpoints: endpoints{
764 "ap-northeast-1": endpoint{},
765 "ap-northeast-2": endpoint{},
766 "ap-south-1": endpoint{},
767 "ap-southeast-1": endpoint{},
768 "ap-southeast-2": endpoint{},
769 "ca-central-1": endpoint{},
770 "eu-central-1": endpoint{},
771 "eu-west-1": endpoint{},
772 "eu-west-2": endpoint{},
773 "sa-east-1": endpoint{},
774 "us-east-1": endpoint{},
775 "us-east-2": endpoint{},
776 "us-west-1": endpoint{},
777 "us-west-2": endpoint{},
778 },
779 },
780 "elasticfilesystem": service{
781
782 Endpoints: endpoints{
783 "ap-southeast-2": endpoint{},
784 "eu-west-1": endpoint{},
785 "us-east-1": endpoint{},
786 "us-east-2": endpoint{},
787 "us-west-2": endpoint{},
788 },
789 },
790 "elasticloadbalancing": service{
791 Defaults: endpoint{
792 Protocols: []string{"http", "https"},
793 },
794 Endpoints: endpoints{
795 "ap-northeast-1": endpoint{},
796 "ap-northeast-2": endpoint{},
797 "ap-south-1": endpoint{},
798 "ap-southeast-1": endpoint{},
799 "ap-southeast-2": endpoint{},
800 "ca-central-1": endpoint{},
801 "eu-central-1": endpoint{},
802 "eu-west-1": endpoint{},
803 "eu-west-2": endpoint{},
804 "sa-east-1": endpoint{},
805 "us-east-1": endpoint{},
806 "us-east-2": endpoint{},
807 "us-west-1": endpoint{},
808 "us-west-2": endpoint{},
809 },
810 },
811 "elasticmapreduce": service{
812 Defaults: endpoint{
813 SSLCommonName: "{region}.{service}.{dnsSuffix}",
814 Protocols: []string{"http", "https"},
815 },
816 Endpoints: endpoints{
817 "ap-northeast-1": endpoint{},
818 "ap-northeast-2": endpoint{},
819 "ap-south-1": endpoint{},
820 "ap-southeast-1": endpoint{},
821 "ap-southeast-2": endpoint{},
822 "ca-central-1": endpoint{},
823 "eu-central-1": endpoint{
824 SSLCommonName: "{service}.{region}.{dnsSuffix}",
825 },
826 "eu-west-1": endpoint{},
827 "eu-west-2": endpoint{},
828 "sa-east-1": endpoint{},
829 "us-east-1": endpoint{
830 SSLCommonName: "{service}.{region}.{dnsSuffix}",
831 },
832 "us-east-2": endpoint{},
833 "us-west-1": endpoint{},
834 "us-west-2": endpoint{},
835 },
836 },
837 "elastictranscoder": service{
838
839 Endpoints: endpoints{
840 "ap-northeast-1": endpoint{},
841 "ap-south-1": endpoint{},
842 "ap-southeast-1": endpoint{},
843 "ap-southeast-2": endpoint{},
844 "eu-west-1": endpoint{},
845 "us-east-1": endpoint{},
846 "us-west-1": endpoint{},
847 "us-west-2": endpoint{},
848 },
849 },
850 "email": service{
851
852 Endpoints: endpoints{
853 "eu-west-1": endpoint{},
854 "us-east-1": endpoint{},
855 "us-west-2": endpoint{},
856 },
857 },
858 "entitlement.marketplace": service{
859 Defaults: endpoint{
860 CredentialScope: credentialScope{
861 Service: "aws-marketplace",
862 },
863 },
864 Endpoints: endpoints{
865 "us-east-1": endpoint{},
866 },
867 },
868 "es": service{
869
870 Endpoints: endpoints{
871 "ap-northeast-1": endpoint{},
872 "ap-northeast-2": endpoint{},
873 "ap-south-1": endpoint{},
874 "ap-southeast-1": endpoint{},
875 "ap-southeast-2": endpoint{},
876 "ca-central-1": endpoint{},
877 "eu-central-1": endpoint{},
878 "eu-west-1": endpoint{},
879 "eu-west-2": endpoint{},
880 "sa-east-1": endpoint{},
881 "us-east-1": endpoint{},
882 "us-east-2": endpoint{},
883 "us-west-1": endpoint{},
884 "us-west-2": endpoint{},
885 },
886 },
887 "events": service{
888
889 Endpoints: endpoints{
890 "ap-northeast-1": endpoint{},
891 "ap-northeast-2": endpoint{},
892 "ap-south-1": endpoint{},
893 "ap-southeast-1": endpoint{},
894 "ap-southeast-2": endpoint{},
895 "ca-central-1": endpoint{},
896 "eu-central-1": endpoint{},
897 "eu-west-1": endpoint{},
898 "eu-west-2": endpoint{},
899 "sa-east-1": endpoint{},
900 "us-east-1": endpoint{},
901 "us-east-2": endpoint{},
902 "us-west-1": endpoint{},
903 "us-west-2": endpoint{},
904 },
905 },
906 "firehose": service{
907
908 Endpoints: endpoints{
909 "eu-west-1": endpoint{},
910 "us-east-1": endpoint{},
911 "us-west-2": endpoint{},
912 },
913 },
914 "gamelift": service{
915
916 Endpoints: endpoints{
917 "ap-northeast-1": endpoint{},
918 "ap-northeast-2": endpoint{},
919 "ap-south-1": endpoint{},
920 "ap-southeast-1": endpoint{},
921 "eu-central-1": endpoint{},
922 "eu-west-1": endpoint{},
923 "sa-east-1": endpoint{},
924 "us-east-1": endpoint{},
925 "us-west-2": endpoint{},
926 },
927 },
928 "glacier": service{
929 Defaults: endpoint{
930 Protocols: []string{"http", "https"},
931 },
932 Endpoints: endpoints{
933 "ap-northeast-1": endpoint{},
934 "ap-northeast-2": endpoint{},
935 "ap-south-1": endpoint{},
936 "ap-southeast-2": endpoint{},
937 "ca-central-1": endpoint{},
938 "eu-central-1": endpoint{},
939 "eu-west-1": endpoint{},
940 "eu-west-2": endpoint{},
941 "us-east-1": endpoint{},
942 "us-east-2": endpoint{},
943 "us-west-1": endpoint{},
944 "us-west-2": endpoint{},
945 },
946 },
947 "health": service{
948
949 Endpoints: endpoints{
950 "us-east-1": endpoint{},
951 },
952 },
953 "iam": service{
954 PartitionEndpoint: "aws-global",
955 IsRegionalized: boxedFalse,
956
957 Endpoints: endpoints{
958 "aws-global": endpoint{
959 Hostname: "iam.amazonaws.com",
960 CredentialScope: credentialScope{
961 Region: "us-east-1",
962 },
963 },
964 },
965 },
966 "importexport": service{
967 PartitionEndpoint: "aws-global",
968 IsRegionalized: boxedFalse,
969
970 Endpoints: endpoints{
971 "aws-global": endpoint{
972 Hostname: "importexport.amazonaws.com",
973 SignatureVersions: []string{"v2", "v4"},
974 CredentialScope: credentialScope{
975 Region: "us-east-1",
976 Service: "IngestionService",
977 },
978 },
979 },
980 },
981 "inspector": service{
982
983 Endpoints: endpoints{
984 "ap-northeast-1": endpoint{},
985 "ap-northeast-2": endpoint{},
986 "ap-south-1": endpoint{},
987 "ap-southeast-2": endpoint{},
988 "eu-west-1": endpoint{},
989 "us-east-1": endpoint{},
990 "us-west-2": endpoint{},
991 },
992 },
993 "iot": service{
994 Defaults: endpoint{
995 CredentialScope: credentialScope{
996 Service: "execute-api",
997 },
998 },
999 Endpoints: endpoints{
1000 "ap-northeast-1": endpoint{},
1001 "ap-northeast-2": endpoint{},
1002 "ap-southeast-1": endpoint{},
1003 "ap-southeast-2": endpoint{},
1004 "eu-central-1": endpoint{},
1005 "eu-west-1": endpoint{},
1006 "eu-west-2": endpoint{},
1007 "us-east-1": endpoint{},
1008 "us-east-2": endpoint{},
1009 "us-west-2": endpoint{},
1010 },
1011 },
1012 "kinesis": service{
1013
1014 Endpoints: endpoints{
1015 "ap-northeast-1": endpoint{},
1016 "ap-northeast-2": endpoint{},
1017 "ap-south-1": endpoint{},
1018 "ap-southeast-1": endpoint{},
1019 "ap-southeast-2": endpoint{},
1020 "ca-central-1": endpoint{},
1021 "eu-central-1": endpoint{},
1022 "eu-west-1": endpoint{},
1023 "eu-west-2": endpoint{},
1024 "sa-east-1": endpoint{},
1025 "us-east-1": endpoint{},
1026 "us-east-2": endpoint{},
1027 "us-west-1": endpoint{},
1028 "us-west-2": endpoint{},
1029 },
1030 },
1031 "kinesisanalytics": service{
1032
1033 Endpoints: endpoints{
1034 "eu-west-1": endpoint{},
1035 "us-east-1": endpoint{},
1036 "us-west-2": endpoint{},
1037 },
1038 },
1039 "kms": service{
1040
1041 Endpoints: endpoints{
1042 "ap-northeast-1": endpoint{},
1043 "ap-northeast-2": endpoint{},
1044 "ap-south-1": endpoint{},
1045 "ap-southeast-1": endpoint{},
1046 "ap-southeast-2": endpoint{},
1047 "ca-central-1": endpoint{},
1048 "eu-central-1": endpoint{},
1049 "eu-west-1": endpoint{},
1050 "eu-west-2": endpoint{},
1051 "sa-east-1": endpoint{},
1052 "us-east-1": endpoint{},
1053 "us-east-2": endpoint{},
1054 "us-west-1": endpoint{},
1055 "us-west-2": endpoint{},
1056 },
1057 },
1058 "lambda": service{
1059
1060 Endpoints: endpoints{
1061 "ap-northeast-1": endpoint{},
1062 "ap-northeast-2": endpoint{},
1063 "ap-south-1": endpoint{},
1064 "ap-southeast-1": endpoint{},
1065 "ap-southeast-2": endpoint{},
1066 "eu-central-1": endpoint{},
1067 "eu-west-1": endpoint{},
1068 "eu-west-2": endpoint{},
1069 "us-east-1": endpoint{},
1070 "us-east-2": endpoint{},
1071 "us-west-1": endpoint{},
1072 "us-west-2": endpoint{},
1073 },
1074 },
1075 "lightsail": service{
1076
1077 Endpoints: endpoints{
1078 "us-east-1": endpoint{},
1079 },
1080 },
1081 "logs": service{
1082
1083 Endpoints: endpoints{
1084 "ap-northeast-1": endpoint{},
1085 "ap-northeast-2": endpoint{},
1086 "ap-south-1": endpoint{},
1087 "ap-southeast-1": endpoint{},
1088 "ap-southeast-2": endpoint{},
1089 "ca-central-1": endpoint{},
1090 "eu-central-1": endpoint{},
1091 "eu-west-1": endpoint{},
1092 "eu-west-2": endpoint{},
1093 "sa-east-1": endpoint{},
1094 "us-east-1": endpoint{},
1095 "us-east-2": endpoint{},
1096 "us-west-1": endpoint{},
1097 "us-west-2": endpoint{},
1098 },
1099 },
1100 "machinelearning": service{
1101
1102 Endpoints: endpoints{
1103 "eu-west-1": endpoint{},
1104 "us-east-1": endpoint{},
1105 },
1106 },
1107 "marketplacecommerceanalytics": service{
1108
1109 Endpoints: endpoints{
1110 "us-east-1": endpoint{},
1111 },
1112 },
1113 "metering.marketplace": service{
1114 Defaults: endpoint{
1115 CredentialScope: credentialScope{
1116 Service: "aws-marketplace",
1117 },
1118 },
1119 Endpoints: endpoints{
1120 "ap-northeast-1": endpoint{},
1121 "ap-northeast-2": endpoint{},
1122 "ap-south-1": endpoint{},
1123 "ap-southeast-1": endpoint{},
1124 "ap-southeast-2": endpoint{},
1125 "ca-central-1": endpoint{},
1126 "eu-central-1": endpoint{},
1127 "eu-west-1": endpoint{},
1128 "eu-west-2": endpoint{},
1129 "sa-east-1": endpoint{},
1130 "us-east-1": endpoint{},
1131 "us-east-2": endpoint{},
1132 "us-west-1": endpoint{},
1133 "us-west-2": endpoint{},
1134 },
1135 },
1136 "mobileanalytics": service{
1137
1138 Endpoints: endpoints{
1139 "us-east-1": endpoint{},
1140 },
1141 },
1142 "models.lex": service{
1143 Defaults: endpoint{
1144 CredentialScope: credentialScope{
1145 Service: "lex",
1146 },
1147 },
1148 Endpoints: endpoints{
1149 "us-east-1": endpoint{},
1150 },
1151 },
1152 "monitoring": service{
1153 Defaults: endpoint{
1154 Protocols: []string{"http", "https"},
1155 },
1156 Endpoints: endpoints{
1157 "ap-northeast-1": endpoint{},
1158 "ap-northeast-2": endpoint{},
1159 "ap-south-1": endpoint{},
1160 "ap-southeast-1": endpoint{},
1161 "ap-southeast-2": endpoint{},
1162 "ca-central-1": endpoint{},
1163 "eu-central-1": endpoint{},
1164 "eu-west-1": endpoint{},
1165 "eu-west-2": endpoint{},
1166 "sa-east-1": endpoint{},
1167 "us-east-1": endpoint{},
1168 "us-east-2": endpoint{},
1169 "us-west-1": endpoint{},
1170 "us-west-2": endpoint{},
1171 },
1172 },
1173 "mturk-requester": service{
1174 IsRegionalized: boxedFalse,
1175
1176 Endpoints: endpoints{
1177 "sandbox": endpoint{
1178 Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
1179 },
1180 "us-east-1": endpoint{},
1181 },
1182 },
1183 "opsworks": service{
1184
1185 Endpoints: endpoints{
1186 "ap-northeast-1": endpoint{},
1187 "ap-northeast-2": endpoint{},
1188 "ap-south-1": endpoint{},
1189 "ap-southeast-1": endpoint{},
1190 "ap-southeast-2": endpoint{},
1191 "eu-central-1": endpoint{},
1192 "eu-west-1": endpoint{},
1193 "eu-west-2": endpoint{},
1194 "sa-east-1": endpoint{},
1195 "us-east-1": endpoint{},
1196 "us-east-2": endpoint{},
1197 "us-west-1": endpoint{},
1198 "us-west-2": endpoint{},
1199 },
1200 },
1201 "opsworks-cm": service{
1202
1203 Endpoints: endpoints{
1204 "eu-west-1": endpoint{},
1205 "us-east-1": endpoint{},
1206 "us-west-2": endpoint{},
1207 },
1208 },
1209 "organizations": service{
1210 PartitionEndpoint: "aws-global",
1211 IsRegionalized: boxedFalse,
1212
1213 Endpoints: endpoints{
1214 "aws-global": endpoint{
1215 Hostname: "organizations.us-east-1.amazonaws.com",
1216 CredentialScope: credentialScope{
1217 Region: "us-east-1",
1218 },
1219 },
1220 },
1221 },
1222 "pinpoint": service{
1223 Defaults: endpoint{
1224 CredentialScope: credentialScope{
1225 Service: "mobiletargeting",
1226 },
1227 },
1228 Endpoints: endpoints{
1229 "us-east-1": endpoint{},
1230 },
1231 },
1232 "polly": service{
1233
1234 Endpoints: endpoints{
1235 "eu-west-1": endpoint{},
1236 "us-east-1": endpoint{},
1237 "us-east-2": endpoint{},
1238 "us-west-2": endpoint{},
1239 },
1240 },
1241 "rds": service{
1242
1243 Endpoints: endpoints{
1244 "ap-northeast-1": endpoint{},
1245 "ap-northeast-2": endpoint{},
1246 "ap-south-1": endpoint{},
1247 "ap-southeast-1": endpoint{},
1248 "ap-southeast-2": endpoint{},
1249 "ca-central-1": endpoint{},
1250 "eu-central-1": endpoint{},
1251 "eu-west-1": endpoint{},
1252 "eu-west-2": endpoint{},
1253 "sa-east-1": endpoint{},
1254 "us-east-1": endpoint{
1255 SSLCommonName: "{service}.{dnsSuffix}",
1256 },
1257 "us-east-2": endpoint{},
1258 "us-west-1": endpoint{},
1259 "us-west-2": endpoint{},
1260 },
1261 },
1262 "redshift": service{
1263
1264 Endpoints: endpoints{
1265 "ap-northeast-1": endpoint{},
1266 "ap-northeast-2": endpoint{},
1267 "ap-south-1": endpoint{},
1268 "ap-southeast-1": endpoint{},
1269 "ap-southeast-2": endpoint{},
1270 "ca-central-1": endpoint{},
1271 "eu-central-1": endpoint{},
1272 "eu-west-1": endpoint{},
1273 "eu-west-2": endpoint{},
1274 "sa-east-1": endpoint{},
1275 "us-east-1": endpoint{},
1276 "us-east-2": endpoint{},
1277 "us-west-1": endpoint{},
1278 "us-west-2": endpoint{},
1279 },
1280 },
1281 "rekognition": service{
1282
1283 Endpoints: endpoints{
1284 "eu-west-1": endpoint{},
1285 "us-east-1": endpoint{},
1286 "us-west-2": endpoint{},
1287 },
1288 },
1289 "route53": service{
1290 PartitionEndpoint: "aws-global",
1291 IsRegionalized: boxedFalse,
1292
1293 Endpoints: endpoints{
1294 "aws-global": endpoint{
1295 Hostname: "route53.amazonaws.com",
1296 CredentialScope: credentialScope{
1297 Region: "us-east-1",
1298 },
1299 },
1300 },
1301 },
1302 "route53domains": service{
1303
1304 Endpoints: endpoints{
1305 "us-east-1": endpoint{},
1306 },
1307 },
1308 "runtime.lex": service{
1309 Defaults: endpoint{
1310 CredentialScope: credentialScope{
1311 Service: "lex",
1312 },
1313 },
1314 Endpoints: endpoints{
1315 "us-east-1": endpoint{},
1316 },
1317 },
1318 "s3": service{
1319 PartitionEndpoint: "us-east-1",
1320 IsRegionalized: boxedTrue,
1321 Defaults: endpoint{
1322 Protocols: []string{"http", "https"},
1323 SignatureVersions: []string{"s3v4"},
1324
1325 HasDualStack: boxedTrue,
1326 DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
1327 },
1328 Endpoints: endpoints{
1329 "ap-northeast-1": endpoint{
1330 Hostname: "s3-ap-northeast-1.amazonaws.com",
1331 SignatureVersions: []string{"s3", "s3v4"},
1332 },
1333 "ap-northeast-2": endpoint{},
1334 "ap-south-1": endpoint{},
1335 "ap-southeast-1": endpoint{
1336 Hostname: "s3-ap-southeast-1.amazonaws.com",
1337 SignatureVersions: []string{"s3", "s3v4"},
1338 },
1339 "ap-southeast-2": endpoint{
1340 Hostname: "s3-ap-southeast-2.amazonaws.com",
1341 SignatureVersions: []string{"s3", "s3v4"},
1342 },
1343 "ca-central-1": endpoint{},
1344 "eu-central-1": endpoint{},
1345 "eu-west-1": endpoint{
1346 Hostname: "s3-eu-west-1.amazonaws.com",
1347 SignatureVersions: []string{"s3", "s3v4"},
1348 },
1349 "eu-west-2": endpoint{},
1350 "s3-external-1": endpoint{
1351 Hostname: "s3-external-1.amazonaws.com",
1352 SignatureVersions: []string{"s3", "s3v4"},
1353 CredentialScope: credentialScope{
1354 Region: "us-east-1",
1355 },
1356 },
1357 "sa-east-1": endpoint{
1358 Hostname: "s3-sa-east-1.amazonaws.com",
1359 SignatureVersions: []string{"s3", "s3v4"},
1360 },
1361 "us-east-1": endpoint{
1362 Hostname: "s3.amazonaws.com",
1363 SignatureVersions: []string{"s3", "s3v4"},
1364 },
1365 "us-east-2": endpoint{},
1366 "us-west-1": endpoint{
1367 Hostname: "s3-us-west-1.amazonaws.com",
1368 SignatureVersions: []string{"s3", "s3v4"},
1369 },
1370 "us-west-2": endpoint{
1371 Hostname: "s3-us-west-2.amazonaws.com",
1372 SignatureVersions: []string{"s3", "s3v4"},
1373 },
1374 },
1375 },
1376 "sdb": service{
1377 Defaults: endpoint{
1378 Protocols: []string{"http", "https"},
1379 SignatureVersions: []string{"v2"},
1380 },
1381 Endpoints: endpoints{
1382 "ap-northeast-1": endpoint{},
1383 "ap-southeast-1": endpoint{},
1384 "ap-southeast-2": endpoint{},
1385 "eu-west-1": endpoint{},
1386 "sa-east-1": endpoint{},
1387 "us-east-1": endpoint{
1388 Hostname: "sdb.amazonaws.com",
1389 },
1390 "us-west-1": endpoint{},
1391 "us-west-2": endpoint{},
1392 },
1393 },
1394 "servicecatalog": service{
1395
1396 Endpoints: endpoints{
1397 "ap-northeast-1": endpoint{},
1398 "ap-southeast-1": endpoint{},
1399 "ap-southeast-2": endpoint{},
1400 "ca-central-1": endpoint{},
1401 "eu-central-1": endpoint{},
1402 "eu-west-1": endpoint{},
1403 "eu-west-2": endpoint{},
1404 "us-east-1": endpoint{},
1405 "us-east-2": endpoint{},
1406 "us-west-2": endpoint{},
1407 },
1408 },
1409 "shield": service{
1410 IsRegionalized: boxedFalse,
1411 Defaults: endpoint{
1412 SSLCommonName: "Shield.us-east-1.amazonaws.com",
1413 Protocols: []string{"https"},
1414 },
1415 Endpoints: endpoints{
1416 "us-east-1": endpoint{},
1417 },
1418 },
1419 "sms": service{
1420
1421 Endpoints: endpoints{
1422 "ap-southeast-2": endpoint{},
1423 "eu-west-1": endpoint{},
1424 "us-east-1": endpoint{},
1425 },
1426 },
1427 "snowball": service{
1428
1429 Endpoints: endpoints{
1430 "ap-south-1": endpoint{},
1431 "ap-southeast-2": endpoint{},
1432 "eu-central-1": endpoint{},
1433 "eu-west-1": endpoint{},
1434 "eu-west-2": endpoint{},
1435 "us-east-1": endpoint{},
1436 "us-east-2": endpoint{},
1437 "us-west-1": endpoint{},
1438 "us-west-2": endpoint{},
1439 },
1440 },
1441 "sns": service{
1442 Defaults: endpoint{
1443 Protocols: []string{"http", "https"},
1444 },
1445 Endpoints: endpoints{
1446 "ap-northeast-1": endpoint{},
1447 "ap-northeast-2": endpoint{},
1448 "ap-south-1": endpoint{},
1449 "ap-southeast-1": endpoint{},
1450 "ap-southeast-2": endpoint{},
1451 "ca-central-1": endpoint{},
1452 "eu-central-1": endpoint{},
1453 "eu-west-1": endpoint{},
1454 "eu-west-2": endpoint{},
1455 "sa-east-1": endpoint{},
1456 "us-east-1": endpoint{},
1457 "us-east-2": endpoint{},
1458 "us-west-1": endpoint{},
1459 "us-west-2": endpoint{},
1460 },
1461 },
1462 "sqs": service{
1463 Defaults: endpoint{
1464 SSLCommonName: "{region}.queue.{dnsSuffix}",
1465 Protocols: []string{"http", "https"},
1466 },
1467 Endpoints: endpoints{
1468 "ap-northeast-1": endpoint{},
1469 "ap-northeast-2": endpoint{},
1470 "ap-south-1": endpoint{},
1471 "ap-southeast-1": endpoint{},
1472 "ap-southeast-2": endpoint{},
1473 "ca-central-1": endpoint{},
1474 "eu-central-1": endpoint{},
1475 "eu-west-1": endpoint{},
1476 "eu-west-2": endpoint{},
1477 "sa-east-1": endpoint{},
1478 "us-east-1": endpoint{
1479 SSLCommonName: "queue.{dnsSuffix}",
1480 },
1481 "us-east-2": endpoint{},
1482 "us-west-1": endpoint{},
1483 "us-west-2": endpoint{},
1484 },
1485 },
1486 "ssm": service{
1487
1488 Endpoints: endpoints{
1489 "ap-northeast-1": endpoint{},
1490 "ap-northeast-2": endpoint{},
1491 "ap-southeast-1": endpoint{},
1492 "ap-southeast-2": endpoint{},
1493 "eu-central-1": endpoint{},
1494 "eu-west-1": endpoint{},
1495 "sa-east-1": endpoint{},
1496 "us-east-1": endpoint{},
1497 "us-east-2": endpoint{},
1498 "us-west-1": endpoint{},
1499 "us-west-2": endpoint{},
1500 },
1501 },
1502 "states": service{
1503
1504 Endpoints: endpoints{
1505 "ap-northeast-1": endpoint{},
1506 "eu-central-1": endpoint{},
1507 "eu-west-1": endpoint{},
1508 "us-east-1": endpoint{},
1509 "us-east-2": endpoint{},
1510 "us-west-2": endpoint{},
1511 },
1512 },
1513 "storagegateway": service{
1514
1515 Endpoints: endpoints{
1516 "ap-northeast-1": endpoint{},
1517 "ap-northeast-2": endpoint{},
1518 "ap-south-1": endpoint{},
1519 "ap-southeast-1": endpoint{},
1520 "ap-southeast-2": endpoint{},
1521 "ca-central-1": endpoint{},
1522 "eu-central-1": endpoint{},
1523 "eu-west-1": endpoint{},
1524 "eu-west-2": endpoint{},
1525 "sa-east-1": endpoint{},
1526 "us-east-1": endpoint{},
1527 "us-east-2": endpoint{},
1528 "us-west-1": endpoint{},
1529 "us-west-2": endpoint{},
1530 },
1531 },
1532 "streams.dynamodb": service{
1533 Defaults: endpoint{
1534 Protocols: []string{"http", "http", "https", "https"},
1535 CredentialScope: credentialScope{
1536 Service: "dynamodb",
1537 },
1538 },
1539 Endpoints: endpoints{
1540 "ap-northeast-1": endpoint{},
1541 "ap-northeast-2": endpoint{},
1542 "ap-south-1": endpoint{},
1543 "ap-southeast-1": endpoint{},
1544 "ap-southeast-2": endpoint{},
1545 "ca-central-1": endpoint{},
1546 "eu-central-1": endpoint{},
1547 "eu-west-1": endpoint{},
1548 "eu-west-2": endpoint{},
1549 "local": endpoint{
1550 Hostname: "localhost:8000",
1551 Protocols: []string{"http"},
1552 CredentialScope: credentialScope{
1553 Region: "us-east-1",
1554 },
1555 },
1556 "sa-east-1": endpoint{},
1557 "us-east-1": endpoint{},
1558 "us-east-2": endpoint{},
1559 "us-west-1": endpoint{},
1560 "us-west-2": endpoint{},
1561 },
1562 },
1563 "sts": service{
1564 PartitionEndpoint: "aws-global",
1565 Defaults: endpoint{
1566 Hostname: "sts.amazonaws.com",
1567 CredentialScope: credentialScope{
1568 Region: "us-east-1",
1569 },
1570 },
1571 Endpoints: endpoints{
1572 "ap-northeast-1": endpoint{},
1573 "ap-northeast-2": endpoint{
1574 Hostname: "sts.ap-northeast-2.amazonaws.com",
1575 CredentialScope: credentialScope{
1576 Region: "ap-northeast-2",
1577 },
1578 },
1579 "ap-south-1": endpoint{},
1580 "ap-southeast-1": endpoint{},
1581 "ap-southeast-2": endpoint{},
1582 "aws-global": endpoint{},
1583 "ca-central-1": endpoint{},
1584 "eu-central-1": endpoint{},
1585 "eu-west-1": endpoint{},
1586 "eu-west-2": endpoint{},
1587 "sa-east-1": endpoint{},
1588 "us-east-1": endpoint{},
1589 "us-east-2": endpoint{},
1590 "us-west-1": endpoint{},
1591 "us-west-2": endpoint{},
1592 },
1593 },
1594 "support": service{
1595
1596 Endpoints: endpoints{
1597 "us-east-1": endpoint{},
1598 },
1599 },
1600 "swf": service{
1601
1602 Endpoints: endpoints{
1603 "ap-northeast-1": endpoint{},
1604 "ap-northeast-2": endpoint{},
1605 "ap-south-1": endpoint{},
1606 "ap-southeast-1": endpoint{},
1607 "ap-southeast-2": endpoint{},
1608 "ca-central-1": endpoint{},
1609 "eu-central-1": endpoint{},
1610 "eu-west-1": endpoint{},
1611 "eu-west-2": endpoint{},
1612 "sa-east-1": endpoint{},
1613 "us-east-1": endpoint{},
1614 "us-east-2": endpoint{},
1615 "us-west-1": endpoint{},
1616 "us-west-2": endpoint{},
1617 },
1618 },
1619 "tagging": service{
1620
1621 Endpoints: endpoints{
1622 "ap-northeast-1": endpoint{},
1623 "ap-northeast-2": endpoint{},
1624 "ap-south-1": endpoint{},
1625 "ap-southeast-1": endpoint{},
1626 "ap-southeast-2": endpoint{},
1627 "ca-central-1": endpoint{},
1628 "eu-central-1": endpoint{},
1629 "eu-west-1": endpoint{},
1630 "eu-west-2": endpoint{},
1631 "sa-east-1": endpoint{},
1632 "us-east-1": endpoint{},
1633 "us-east-2": endpoint{},
1634 "us-west-1": endpoint{},
1635 "us-west-2": endpoint{},
1636 },
1637 },
1638 "waf": service{
1639 PartitionEndpoint: "aws-global",
1640 IsRegionalized: boxedFalse,
1641
1642 Endpoints: endpoints{
1643 "aws-global": endpoint{
1644 Hostname: "waf.amazonaws.com",
1645 CredentialScope: credentialScope{
1646 Region: "us-east-1",
1647 },
1648 },
1649 },
1650 },
1651 "waf-regional": service{
1652
1653 Endpoints: endpoints{
1654 "ap-northeast-1": endpoint{},
1655 "eu-west-1": endpoint{},
1656 "us-east-1": endpoint{},
1657 "us-west-2": endpoint{},
1658 },
1659 },
1660 "workdocs": service{
1661
1662 Endpoints: endpoints{
1663 "ap-northeast-1": endpoint{},
1664 "ap-southeast-1": endpoint{},
1665 "ap-southeast-2": endpoint{},
1666 "eu-west-1": endpoint{},
1667 "us-east-1": endpoint{},
1668 "us-west-2": endpoint{},
1669 },
1670 },
1671 "workspaces": service{
1672
1673 Endpoints: endpoints{
1674 "ap-northeast-1": endpoint{},
1675 "ap-southeast-1": endpoint{},
1676 "ap-southeast-2": endpoint{},
1677 "eu-central-1": endpoint{},
1678 "eu-west-1": endpoint{},
1679 "us-east-1": endpoint{},
1680 "us-west-2": endpoint{},
1681 },
1682 },
1683 "xray": service{
1684
1685 Endpoints: endpoints{
1686 "ap-northeast-1": endpoint{},
1687 "ap-northeast-2": endpoint{},
1688 "ap-south-1": endpoint{},
1689 "ap-southeast-1": endpoint{},
1690 "ap-southeast-2": endpoint{},
1691 "eu-central-1": endpoint{},
1692 "eu-west-1": endpoint{},
1693 "sa-east-1": endpoint{},
1694 "us-east-1": endpoint{},
1695 "us-east-2": endpoint{},
1696 "us-west-1": endpoint{},
1697 "us-west-2": endpoint{},
1698 },
1699 },
1700 },
1701}
1702
1703// AwsCnPartition returns the Resolver for AWS China.
1704func AwsCnPartition() Partition {
1705 return awscnPartition.Partition()
1706}
1707
1708var awscnPartition = partition{
1709 ID: "aws-cn",
1710 Name: "AWS China",
1711 DNSSuffix: "amazonaws.com.cn",
1712 RegionRegex: regionRegex{
1713 Regexp: func() *regexp.Regexp {
1714 reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$")
1715 return reg
1716 }(),
1717 },
1718 Defaults: endpoint{
1719 Hostname: "{service}.{region}.{dnsSuffix}",
1720 Protocols: []string{"https"},
1721 SignatureVersions: []string{"v4"},
1722 },
1723 Regions: regions{
1724 "cn-north-1": region{
1725 Description: "China (Beijing)",
1726 },
1727 },
1728 Services: services{
1729 "autoscaling": service{
1730 Defaults: endpoint{
1731 Protocols: []string{"http", "https"},
1732 },
1733 Endpoints: endpoints{
1734 "cn-north-1": endpoint{},
1735 },
1736 },
1737 "cloudformation": service{
1738
1739 Endpoints: endpoints{
1740 "cn-north-1": endpoint{},
1741 },
1742 },
1743 "cloudtrail": service{
1744
1745 Endpoints: endpoints{
1746 "cn-north-1": endpoint{},
1747 },
1748 },
1749 "codedeploy": service{
1750
1751 Endpoints: endpoints{
1752 "cn-north-1": endpoint{},
1753 },
1754 },
1755 "config": service{
1756
1757 Endpoints: endpoints{
1758 "cn-north-1": endpoint{},
1759 },
1760 },
1761 "directconnect": service{
1762
1763 Endpoints: endpoints{
1764 "cn-north-1": endpoint{},
1765 },
1766 },
1767 "dynamodb": service{
1768 Defaults: endpoint{
1769 Protocols: []string{"http", "https"},
1770 },
1771 Endpoints: endpoints{
1772 "cn-north-1": endpoint{},
1773 },
1774 },
1775 "ec2": service{
1776 Defaults: endpoint{
1777 Protocols: []string{"http", "https"},
1778 },
1779 Endpoints: endpoints{
1780 "cn-north-1": endpoint{},
1781 },
1782 },
1783 "ec2metadata": service{
1784 PartitionEndpoint: "aws-global",
1785 IsRegionalized: boxedFalse,
1786
1787 Endpoints: endpoints{
1788 "aws-global": endpoint{
1789 Hostname: "169.254.169.254/latest",
1790 Protocols: []string{"http"},
1791 },
1792 },
1793 },
1794 "elasticache": service{
1795
1796 Endpoints: endpoints{
1797 "cn-north-1": endpoint{},
1798 },
1799 },
1800 "elasticbeanstalk": service{
1801
1802 Endpoints: endpoints{
1803 "cn-north-1": endpoint{},
1804 },
1805 },
1806 "elasticloadbalancing": service{
1807 Defaults: endpoint{
1808 Protocols: []string{"http", "https"},
1809 },
1810 Endpoints: endpoints{
1811 "cn-north-1": endpoint{},
1812 },
1813 },
1814 "elasticmapreduce": service{
1815 Defaults: endpoint{
1816 Protocols: []string{"http", "https"},
1817 },
1818 Endpoints: endpoints{
1819 "cn-north-1": endpoint{},
1820 },
1821 },
1822 "events": service{
1823
1824 Endpoints: endpoints{
1825 "cn-north-1": endpoint{},
1826 },
1827 },
1828 "glacier": service{
1829 Defaults: endpoint{
1830 Protocols: []string{"http", "https"},
1831 },
1832 Endpoints: endpoints{
1833 "cn-north-1": endpoint{},
1834 },
1835 },
1836 "iam": service{
1837 PartitionEndpoint: "aws-cn-global",
1838 IsRegionalized: boxedFalse,
1839
1840 Endpoints: endpoints{
1841 "aws-cn-global": endpoint{
1842 Hostname: "iam.cn-north-1.amazonaws.com.cn",
1843 CredentialScope: credentialScope{
1844 Region: "cn-north-1",
1845 },
1846 },
1847 },
1848 },
1849 "kinesis": service{
1850
1851 Endpoints: endpoints{
1852 "cn-north-1": endpoint{},
1853 },
1854 },
1855 "logs": service{
1856
1857 Endpoints: endpoints{
1858 "cn-north-1": endpoint{},
1859 },
1860 },
1861 "monitoring": service{
1862 Defaults: endpoint{
1863 Protocols: []string{"http", "https"},
1864 },
1865 Endpoints: endpoints{
1866 "cn-north-1": endpoint{},
1867 },
1868 },
1869 "rds": service{
1870
1871 Endpoints: endpoints{
1872 "cn-north-1": endpoint{},
1873 },
1874 },
1875 "redshift": service{
1876
1877 Endpoints: endpoints{
1878 "cn-north-1": endpoint{},
1879 },
1880 },
1881 "s3": service{
1882 Defaults: endpoint{
1883 Protocols: []string{"http", "https"},
1884 SignatureVersions: []string{"s3v4"},
1885 },
1886 Endpoints: endpoints{
1887 "cn-north-1": endpoint{},
1888 },
1889 },
1890 "sns": service{
1891 Defaults: endpoint{
1892 Protocols: []string{"http", "https"},
1893 },
1894 Endpoints: endpoints{
1895 "cn-north-1": endpoint{},
1896 },
1897 },
1898 "sqs": service{
1899 Defaults: endpoint{
1900 SSLCommonName: "{region}.queue.{dnsSuffix}",
1901 Protocols: []string{"http", "https"},
1902 },
1903 Endpoints: endpoints{
1904 "cn-north-1": endpoint{},
1905 },
1906 },
1907 "storagegateway": service{
1908
1909 Endpoints: endpoints{
1910 "cn-north-1": endpoint{},
1911 },
1912 },
1913 "streams.dynamodb": service{
1914 Defaults: endpoint{
1915 Protocols: []string{"http", "http", "https", "https"},
1916 CredentialScope: credentialScope{
1917 Service: "dynamodb",
1918 },
1919 },
1920 Endpoints: endpoints{
1921 "cn-north-1": endpoint{},
1922 },
1923 },
1924 "sts": service{
1925
1926 Endpoints: endpoints{
1927 "cn-north-1": endpoint{},
1928 },
1929 },
1930 "swf": service{
1931
1932 Endpoints: endpoints{
1933 "cn-north-1": endpoint{},
1934 },
1935 },
1936 "tagging": service{
1937
1938 Endpoints: endpoints{
1939 "cn-north-1": endpoint{},
1940 },
1941 },
1942 },
1943}
1944
1945// AwsUsGovPartition returns the Resolver for AWS GovCloud (US).
1946func AwsUsGovPartition() Partition {
1947 return awsusgovPartition.Partition()
1948}
1949
1950var awsusgovPartition = partition{
1951 ID: "aws-us-gov",
1952 Name: "AWS GovCloud (US)",
1953 DNSSuffix: "amazonaws.com",
1954 RegionRegex: regionRegex{
1955 Regexp: func() *regexp.Regexp {
1956 reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$")
1957 return reg
1958 }(),
1959 },
1960 Defaults: endpoint{
1961 Hostname: "{service}.{region}.{dnsSuffix}",
1962 Protocols: []string{"https"},
1963 SignatureVersions: []string{"v4"},
1964 },
1965 Regions: regions{
1966 "us-gov-west-1": region{
1967 Description: "AWS GovCloud (US)",
1968 },
1969 },
1970 Services: services{
1971 "autoscaling": service{
1972
1973 Endpoints: endpoints{
1974 "us-gov-west-1": endpoint{
1975 Protocols: []string{"http", "https"},
1976 },
1977 },
1978 },
1979 "cloudformation": service{
1980
1981 Endpoints: endpoints{
1982 "us-gov-west-1": endpoint{},
1983 },
1984 },
1985 "cloudhsm": service{
1986
1987 Endpoints: endpoints{
1988 "us-gov-west-1": endpoint{},
1989 },
1990 },
1991 "cloudtrail": service{
1992
1993 Endpoints: endpoints{
1994 "us-gov-west-1": endpoint{},
1995 },
1996 },
1997 "config": service{
1998
1999 Endpoints: endpoints{
2000 "us-gov-west-1": endpoint{},
2001 },
2002 },
2003 "directconnect": service{
2004
2005 Endpoints: endpoints{
2006 "us-gov-west-1": endpoint{},
2007 },
2008 },
2009 "dynamodb": service{
2010
2011 Endpoints: endpoints{
2012 "us-gov-west-1": endpoint{},
2013 },
2014 },
2015 "ec2": service{
2016
2017 Endpoints: endpoints{
2018 "us-gov-west-1": endpoint{},
2019 },
2020 },
2021 "ec2metadata": service{
2022 PartitionEndpoint: "aws-global",
2023 IsRegionalized: boxedFalse,
2024
2025 Endpoints: endpoints{
2026 "aws-global": endpoint{
2027 Hostname: "169.254.169.254/latest",
2028 Protocols: []string{"http"},
2029 },
2030 },
2031 },
2032 "elasticache": service{
2033
2034 Endpoints: endpoints{
2035 "us-gov-west-1": endpoint{},
2036 },
2037 },
2038 "elasticloadbalancing": service{
2039
2040 Endpoints: endpoints{
2041 "us-gov-west-1": endpoint{
2042 Protocols: []string{"http", "https"},
2043 },
2044 },
2045 },
2046 "elasticmapreduce": service{
2047
2048 Endpoints: endpoints{
2049 "us-gov-west-1": endpoint{
2050 Protocols: []string{"http", "https"},
2051 },
2052 },
2053 },
2054 "glacier": service{
2055
2056 Endpoints: endpoints{
2057 "us-gov-west-1": endpoint{
2058 Protocols: []string{"http", "https"},
2059 },
2060 },
2061 },
2062 "iam": service{
2063 PartitionEndpoint: "aws-us-gov-global",
2064 IsRegionalized: boxedFalse,
2065
2066 Endpoints: endpoints{
2067 "aws-us-gov-global": endpoint{
2068 Hostname: "iam.us-gov.amazonaws.com",
2069 CredentialScope: credentialScope{
2070 Region: "us-gov-west-1",
2071 },
2072 },
2073 },
2074 },
2075 "kinesis": service{
2076
2077 Endpoints: endpoints{
2078 "us-gov-west-1": endpoint{},
2079 },
2080 },
2081 "kms": service{
2082
2083 Endpoints: endpoints{
2084 "us-gov-west-1": endpoint{},
2085 },
2086 },
2087 "logs": service{
2088
2089 Endpoints: endpoints{
2090 "us-gov-west-1": endpoint{},
2091 },
2092 },
2093 "monitoring": service{
2094
2095 Endpoints: endpoints{
2096 "us-gov-west-1": endpoint{},
2097 },
2098 },
2099 "rds": service{
2100
2101 Endpoints: endpoints{
2102 "us-gov-west-1": endpoint{},
2103 },
2104 },
2105 "redshift": service{
2106
2107 Endpoints: endpoints{
2108 "us-gov-west-1": endpoint{},
2109 },
2110 },
2111 "s3": service{
2112 Defaults: endpoint{
2113 SignatureVersions: []string{"s3", "s3v4"},
2114 },
2115 Endpoints: endpoints{
2116 "fips-us-gov-west-1": endpoint{
2117 Hostname: "s3-fips-us-gov-west-1.amazonaws.com",
2118 CredentialScope: credentialScope{
2119 Region: "us-gov-west-1",
2120 },
2121 },
2122 "us-gov-west-1": endpoint{
2123 Hostname: "s3-us-gov-west-1.amazonaws.com",
2124 Protocols: []string{"http", "https"},
2125 },
2126 },
2127 },
2128 "snowball": service{
2129
2130 Endpoints: endpoints{
2131 "us-gov-west-1": endpoint{},
2132 },
2133 },
2134 "sns": service{
2135
2136 Endpoints: endpoints{
2137 "us-gov-west-1": endpoint{
2138 Protocols: []string{"http", "https"},
2139 },
2140 },
2141 },
2142 "sqs": service{
2143
2144 Endpoints: endpoints{
2145 "us-gov-west-1": endpoint{
2146 SSLCommonName: "{region}.queue.{dnsSuffix}",
2147 Protocols: []string{"http", "https"},
2148 },
2149 },
2150 },
2151 "streams.dynamodb": service{
2152 Defaults: endpoint{
2153 CredentialScope: credentialScope{
2154 Service: "dynamodb",
2155 },
2156 },
2157 Endpoints: endpoints{
2158 "us-gov-west-1": endpoint{},
2159 },
2160 },
2161 "sts": service{
2162
2163 Endpoints: endpoints{
2164 "us-gov-west-1": endpoint{},
2165 },
2166 },
2167 "swf": service{
2168
2169 Endpoints: endpoints{
2170 "us-gov-west-1": endpoint{},
2171 },
2172 },
2173 },
2174}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
new file mode 100644
index 0000000..a0e9bc4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
@@ -0,0 +1,66 @@
1// Package endpoints provides the types and functionality for defining regions
2// and endpoints, as well as querying those definitions.
3//
4// The SDK's Regions and Endpoints metadata is code generated into the endpoints
5// package, and is accessible via the DefaultResolver function. This function
6// returns a endpoint Resolver will search the metadata and build an associated
7// endpoint if one is found. The default resolver will search all partitions
8// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
9// AWS GovCloud (US) (aws-us-gov).
10// .
11//
12// Enumerating Regions and Endpoint Metadata
13//
14// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
15// will allow you to get access to the list of underlying Partitions with the
16// Partitions method. This is helpful if you want to limit the SDK's endpoint
17// resolving to a single partition, or enumerate regions, services, and endpoints
18// in the partition.
19//
20// resolver := endpoints.DefaultResolver()
21// partitions := resolver.(endpoints.EnumPartitions).Partitions()
22//
23// for _, p := range partitions {
24// fmt.Println("Regions for", p.Name)
25// for id, _ := range p.Regions() {
26// fmt.Println("*", id)
27// }
28//
29// fmt.Println("Services for", p.Name)
30// for id, _ := range p.Services() {
31// fmt.Println("*", id)
32// }
33// }
34//
35// Using Custom Endpoints
36//
37// The endpoints package also gives you the ability to use your own logic how
38// endpoints are resolved. This is a great way to define a custom endpoint
39// for select services, without passing that logic down through your code.
40//
41// If a type implements the Resolver interface it can be used to resolve
42// endpoints. To use this with the SDK's Session and Config set the value
43// of the type to the EndpointsResolver field of aws.Config when initializing
44// the session, or service client.
45//
46// In addition the ResolverFunc is a wrapper for a func matching the signature
47// of Resolver.EndpointFor, converting it to a type that satisfies the
48// Resolver interface.
49//
50//
51// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
52// if service == endpoints.S3ServiceID {
53// return endpoints.ResolvedEndpoint{
54// URL: "s3.custom.endpoint.com",
55// SigningRegion: "custom-signing-region",
56// }, nil
57// }
58//
59// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
60// }
61//
62// sess := session.Must(session.NewSession(&aws.Config{
63// Region: aws.String("us-west-2"),
64// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
65// }))
66package endpoints
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
new file mode 100644
index 0000000..9c3eedb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
@@ -0,0 +1,439 @@
1package endpoints
2
3import (
4 "fmt"
5 "regexp"
6
7 "github.com/aws/aws-sdk-go/aws/awserr"
8)
9
10// Options provide the configuration needed to direct how the
11// endpoints will be resolved.
12type Options struct {
13 // DisableSSL forces the endpoint to be resolved as HTTP.
14 // instead of HTTPS if the service supports it.
15 DisableSSL bool
16
17 // Sets the resolver to resolve the endpoint as a dualstack endpoint
18 // for the service. If dualstack support for a service is not known and
19 // StrictMatching is not enabled a dualstack endpoint for the service will
20 // be returned. This endpoint may not be valid. If StrictMatching is
21 // enabled only services that are known to support dualstack will return
22 // dualstack endpoints.
23 UseDualStack bool
24
25 // Enables strict matching of services and regions resolved endpoints.
26 // If the partition doesn't enumerate the exact service and region an
27 // error will be returned. This option will prevent returning endpoints
28 // that look valid, but may not resolve to any real endpoint.
29 StrictMatching bool
30
31 // Enables resolving a service endpoint based on the region provided if the
32 // service does not exist. The service endpoint ID will be used as the service
33 // domain name prefix. By default the endpoint resolver requires the service
34 // to be known when resolving endpoints.
35 //
36 // If resolving an endpoint on the partition list the provided region will
37 // be used to determine which partition's domain name pattern to the service
38 // endpoint ID with. If both the service and region are unkonwn and resolving
39 // the endpoint on partition list an UnknownEndpointError error will be returned.
40 //
41 // If resolving and endpoint on a partition specific resolver that partition's
42 // domain name pattern will be used with the service endpoint ID. If both
43 // region and service do not exist when resolving an endpoint on a specific
44 // partition the partition's domain pattern will be used to combine the
45 // endpoint and region together.
46 //
47 // This option is ignored if StrictMatching is enabled.
48 ResolveUnknownService bool
49}
50
51// Set combines all of the option functions together.
52func (o *Options) Set(optFns ...func(*Options)) {
53 for _, fn := range optFns {
54 fn(o)
55 }
56}
57
58// DisableSSLOption sets the DisableSSL options. Can be used as a functional
59// option when resolving endpoints.
60func DisableSSLOption(o *Options) {
61 o.DisableSSL = true
62}
63
64// UseDualStackOption sets the UseDualStack option. Can be used as a functional
65// option when resolving endpoints.
66func UseDualStackOption(o *Options) {
67 o.UseDualStack = true
68}
69
70// StrictMatchingOption sets the StrictMatching option. Can be used as a functional
71// option when resolving endpoints.
72func StrictMatchingOption(o *Options) {
73 o.StrictMatching = true
74}
75
76// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used
77// as a functional option when resolving endpoints.
78func ResolveUnknownServiceOption(o *Options) {
79 o.ResolveUnknownService = true
80}
81
82// A Resolver provides the interface for functionality to resolve endpoints.
83// The build in Partition and DefaultResolver return value satisfy this interface.
84type Resolver interface {
85 EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
86}
87
88// ResolverFunc is a helper utility that wraps a function so it satisfies the
89// Resolver interface. This is useful when you want to add additional endpoint
90// resolving logic, or stub out specific endpoints with custom values.
91type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
92
93// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface.
94func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
95 return fn(service, region, opts...)
96}
97
98var schemeRE = regexp.MustCompile("^([^:]+)://")
99
100// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
101// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS.
102//
103// If disableSSL is set, it will only set the URL's scheme if the URL does not
104// contain a scheme.
105func AddScheme(endpoint string, disableSSL bool) string {
106 if !schemeRE.MatchString(endpoint) {
107 scheme := "https"
108 if disableSSL {
109 scheme = "http"
110 }
111 endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
112 }
113
114 return endpoint
115}
116
117// EnumPartitions a provides a way to retrieve the underlying partitions that
118// make up the SDK's default Resolver, or any resolver decoded from a model
119// file.
120//
121// Use this interface with DefaultResolver and DecodeModels to get the list of
122// Partitions.
123type EnumPartitions interface {
124 Partitions() []Partition
125}
126
127// RegionsForService returns a map of regions for the partition and service.
128// If either the partition or service does not exist false will be returned
129// as the second parameter.
130//
131// This example shows how to get the regions for DynamoDB in the AWS partition.
132// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
133//
134// This is equivalent to using the partition directly.
135// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
136func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
137 for _, p := range ps {
138 if p.ID() != partitionID {
139 continue
140 }
141 if _, ok := p.p.Services[serviceID]; !ok {
142 break
143 }
144
145 s := Service{
146 id: serviceID,
147 p: p.p,
148 }
149 return s.Regions(), true
150 }
151
152 return map[string]Region{}, false
153}
154
155// PartitionForRegion returns the first partition which includes the region
156// passed in. This includes both known regions and regions which match
157// a pattern supported by the partition which may include regions that are
158// not explicitly known by the partition. Use the Regions method of the
159// returned Partition if explicit support is needed.
160func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
161 for _, p := range ps {
162 if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) {
163 return p, true
164 }
165 }
166
167 return Partition{}, false
168}
169
170// A Partition provides the ability to enumerate the partition's regions
171// and services.
172type Partition struct {
173 id string
174 p *partition
175}
176
177// ID returns the identifier of the partition.
178func (p Partition) ID() string { return p.id }
179
180// EndpointFor attempts to resolve the endpoint based on service and region.
181// See Options for information on configuring how the endpoint is resolved.
182//
183// If the service cannot be found in the metadata the UnknownServiceError
184// error will be returned. This validation will occur regardless if
185// StrictMatching is enabled. To enable resolving unknown services set the
186// "ResolveUnknownService" option to true. When StrictMatching is disabled
187// this option allows the partition resolver to resolve a endpoint based on
188// the service endpoint ID provided.
189//
190// When resolving endpoints you can choose to enable StrictMatching. This will
191// require the provided service and region to be known by the partition.
192// If the endpoint cannot be strictly resolved an error will be returned. This
193// mode is useful to ensure the endpoint resolved is valid. Without
194// StrictMatching enabled the endpoint returned my look valid but may not work.
195// StrictMatching requires the SDK to be updated if you want to take advantage
196// of new regions and services expansions.
197//
198// Errors that can be returned.
199// * UnknownServiceError
200// * UnknownEndpointError
201func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
202 return p.p.EndpointFor(service, region, opts...)
203}
204
205// Regions returns a map of Regions indexed by their ID. This is useful for
206// enumerating over the regions in a partition.
207func (p Partition) Regions() map[string]Region {
208 rs := map[string]Region{}
209 for id := range p.p.Regions {
210 rs[id] = Region{
211 id: id,
212 p: p.p,
213 }
214 }
215
216 return rs
217}
218
219// Services returns a map of Service indexed by their ID. This is useful for
220// enumerating over the services in a partition.
221func (p Partition) Services() map[string]Service {
222 ss := map[string]Service{}
223 for id := range p.p.Services {
224 ss[id] = Service{
225 id: id,
226 p: p.p,
227 }
228 }
229
230 return ss
231}
232
233// A Region provides information about a region, and ability to resolve an
234// endpoint from the context of a region, given a service.
235type Region struct {
236 id, desc string
237 p *partition
238}
239
240// ID returns the region's identifier.
241func (r Region) ID() string { return r.id }
242
243// ResolveEndpoint resolves an endpoint from the context of the region given
244// a service. See Partition.EndpointFor for usage and errors that can be returned.
245func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
246 return r.p.EndpointFor(service, r.id, opts...)
247}
248
249// Services returns a list of all services that are known to be in this region.
250func (r Region) Services() map[string]Service {
251 ss := map[string]Service{}
252 for id, s := range r.p.Services {
253 if _, ok := s.Endpoints[r.id]; ok {
254 ss[id] = Service{
255 id: id,
256 p: r.p,
257 }
258 }
259 }
260
261 return ss
262}
263
264// A Service provides information about a service, and ability to resolve an
265// endpoint from the context of a service, given a region.
266type Service struct {
267 id string
268 p *partition
269}
270
271// ID returns the identifier for the service.
272func (s Service) ID() string { return s.id }
273
274// ResolveEndpoint resolves an endpoint from the context of a service given
275// a region. See Partition.EndpointFor for usage and errors that can be returned.
276func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
277 return s.p.EndpointFor(s.id, region, opts...)
278}
279
280// Regions returns a map of Regions that the service is present in.
281//
282// A region is the AWS region the service exists in. Whereas a Endpoint is
283// an URL that can be resolved to a instance of a service.
284func (s Service) Regions() map[string]Region {
285 rs := map[string]Region{}
286 for id := range s.p.Services[s.id].Endpoints {
287 if _, ok := s.p.Regions[id]; ok {
288 rs[id] = Region{
289 id: id,
290 p: s.p,
291 }
292 }
293 }
294
295 return rs
296}
297
298// Endpoints returns a map of Endpoints indexed by their ID for all known
299// endpoints for a service.
300//
301// A region is the AWS region the service exists in. Whereas a Endpoint is
302// an URL that can be resolved to a instance of a service.
303func (s Service) Endpoints() map[string]Endpoint {
304 es := map[string]Endpoint{}
305 for id := range s.p.Services[s.id].Endpoints {
306 es[id] = Endpoint{
307 id: id,
308 serviceID: s.id,
309 p: s.p,
310 }
311 }
312
313 return es
314}
315
316// A Endpoint provides information about endpoints, and provides the ability
317// to resolve that endpoint for the service, and the region the endpoint
318// represents.
319type Endpoint struct {
320 id string
321 serviceID string
322 p *partition
323}
324
325// ID returns the identifier for an endpoint.
326func (e Endpoint) ID() string { return e.id }
327
328// ServiceID returns the identifier the endpoint belongs to.
329func (e Endpoint) ServiceID() string { return e.serviceID }
330
331// ResolveEndpoint resolves an endpoint from the context of a service and
332// region the endpoint represents. See Partition.EndpointFor for usage and
333// errors that can be returned.
334func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
335 return e.p.EndpointFor(e.serviceID, e.id, opts...)
336}
337
338// A ResolvedEndpoint is an endpoint that has been resolved based on a partition
339// service, and region.
340type ResolvedEndpoint struct {
341 // The endpoint URL
342 URL string
343
344 // The region that should be used for signing requests.
345 SigningRegion string
346
347 // The service name that should be used for signing requests.
348 SigningName string
349
350 // The signing method that should be used for signing requests.
351 SigningMethod string
352}
353
354// So that the Error interface type can be included as an anonymous field
355// in the requestError struct and not conflict with the error.Error() method.
356type awsError awserr.Error
357
358// A EndpointNotFoundError is returned when in StrictMatching mode, and the
359// endpoint for the service and region cannot be found in any of the partitions.
360type EndpointNotFoundError struct {
361 awsError
362 Partition string
363 Service string
364 Region string
365}
366
367// A UnknownServiceError is returned when the service does not resolve to an
368// endpoint. Includes a list of all known services for the partition. Returned
369// when a partition does not support the service.
370type UnknownServiceError struct {
371 awsError
372 Partition string
373 Service string
374 Known []string
375}
376
377// NewUnknownServiceError builds and returns UnknownServiceError.
378func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
379 return UnknownServiceError{
380 awsError: awserr.New("UnknownServiceError",
381 "could not resolve endpoint for unknown service", nil),
382 Partition: p,
383 Service: s,
384 Known: known,
385 }
386}
387
388// String returns the string representation of the error.
389func (e UnknownServiceError) Error() string {
390 extra := fmt.Sprintf("partition: %q, service: %q",
391 e.Partition, e.Service)
392 if len(e.Known) > 0 {
393 extra += fmt.Sprintf(", known: %v", e.Known)
394 }
395 return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
396}
397
398// String returns the string representation of the error.
399func (e UnknownServiceError) String() string {
400 return e.Error()
401}
402
403// A UnknownEndpointError is returned when in StrictMatching mode and the
404// service is valid, but the region does not resolve to an endpoint. Includes
405// a list of all known endpoints for the service.
406type UnknownEndpointError struct {
407 awsError
408 Partition string
409 Service string
410 Region string
411 Known []string
412}
413
414// NewUnknownEndpointError builds and returns UnknownEndpointError.
415func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
416 return UnknownEndpointError{
417 awsError: awserr.New("UnknownEndpointError",
418 "could not resolve endpoint", nil),
419 Partition: p,
420 Service: s,
421 Region: r,
422 Known: known,
423 }
424}
425
426// String returns the string representation of the error.
427func (e UnknownEndpointError) Error() string {
428 extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
429 e.Partition, e.Service, e.Region)
430 if len(e.Known) > 0 {
431 extra += fmt.Sprintf(", known: %v", e.Known)
432 }
433 return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
434}
435
436// String returns the string representation of the error.
437func (e UnknownEndpointError) String() string {
438 return e.Error()
439}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
new file mode 100644
index 0000000..13d968a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
@@ -0,0 +1,303 @@
1package endpoints
2
3import (
4 "fmt"
5 "regexp"
6 "strconv"
7 "strings"
8)
9
10type partitions []partition
11
12func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
13 var opt Options
14 opt.Set(opts...)
15
16 for i := 0; i < len(ps); i++ {
17 if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) {
18 continue
19 }
20
21 return ps[i].EndpointFor(service, region, opts...)
22 }
23
24 // If loose matching fallback to first partition format to use
25 // when resolving the endpoint.
26 if !opt.StrictMatching && len(ps) > 0 {
27 return ps[0].EndpointFor(service, region, opts...)
28 }
29
30 return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
31}
32
33// Partitions satisfies the EnumPartitions interface and returns a list
34// of Partitions representing each partition represented in the SDK's
35// endpoints model.
36func (ps partitions) Partitions() []Partition {
37 parts := make([]Partition, 0, len(ps))
38 for i := 0; i < len(ps); i++ {
39 parts = append(parts, ps[i].Partition())
40 }
41
42 return parts
43}
44
45type partition struct {
46 ID string `json:"partition"`
47 Name string `json:"partitionName"`
48 DNSSuffix string `json:"dnsSuffix"`
49 RegionRegex regionRegex `json:"regionRegex"`
50 Defaults endpoint `json:"defaults"`
51 Regions regions `json:"regions"`
52 Services services `json:"services"`
53}
54
55func (p partition) Partition() Partition {
56 return Partition{
57 id: p.ID,
58 p: &p,
59 }
60}
61
62func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool {
63 s, hasService := p.Services[service]
64 _, hasEndpoint := s.Endpoints[region]
65
66 if hasEndpoint && hasService {
67 return true
68 }
69
70 if strictMatch {
71 return false
72 }
73
74 return p.RegionRegex.MatchString(region)
75}
76
77func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
78 var opt Options
79 opt.Set(opts...)
80
81 s, hasService := p.Services[service]
82 if !(hasService || opt.ResolveUnknownService) {
83 // Only return error if the resolver will not fallback to creating
84 // endpoint based on service endpoint ID passed in.
85 return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
86 }
87
88 e, hasEndpoint := s.endpointForRegion(region)
89 if !hasEndpoint && opt.StrictMatching {
90 return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
91 }
92
93 defs := []endpoint{p.Defaults, s.Defaults}
94 return e.resolve(service, region, p.DNSSuffix, defs, opt), nil
95}
96
97func serviceList(ss services) []string {
98 list := make([]string, 0, len(ss))
99 for k := range ss {
100 list = append(list, k)
101 }
102 return list
103}
104func endpointList(es endpoints) []string {
105 list := make([]string, 0, len(es))
106 for k := range es {
107 list = append(list, k)
108 }
109 return list
110}
111
112type regionRegex struct {
113 *regexp.Regexp
114}
115
116func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
117 // Strip leading and trailing quotes
118 regex, err := strconv.Unquote(string(b))
119 if err != nil {
120 return fmt.Errorf("unable to strip quotes from regex, %v", err)
121 }
122
123 rr.Regexp, err = regexp.Compile(regex)
124 if err != nil {
125 return fmt.Errorf("unable to unmarshal region regex, %v", err)
126 }
127 return nil
128}
129
130type regions map[string]region
131
132type region struct {
133 Description string `json:"description"`
134}
135
136type services map[string]service
137
138type service struct {
139 PartitionEndpoint string `json:"partitionEndpoint"`
140 IsRegionalized boxedBool `json:"isRegionalized,omitempty"`
141 Defaults endpoint `json:"defaults"`
142 Endpoints endpoints `json:"endpoints"`
143}
144
145func (s *service) endpointForRegion(region string) (endpoint, bool) {
146 if s.IsRegionalized == boxedFalse {
147 return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
148 }
149
150 if e, ok := s.Endpoints[region]; ok {
151 return e, true
152 }
153
154 // Unable to find any matching endpoint, return
155 // blank that will be used for generic endpoint creation.
156 return endpoint{}, false
157}
158
159type endpoints map[string]endpoint
160
161type endpoint struct {
162 Hostname string `json:"hostname"`
163 Protocols []string `json:"protocols"`
164 CredentialScope credentialScope `json:"credentialScope"`
165
166 // Custom fields not modeled
167 HasDualStack boxedBool `json:"-"`
168 DualStackHostname string `json:"-"`
169
170 // Signature Version not used
171 SignatureVersions []string `json:"signatureVersions"`
172
173 // SSLCommonName not used.
174 SSLCommonName string `json:"sslCommonName"`
175}
176
177const (
178 defaultProtocol = "https"
179 defaultSigner = "v4"
180)
181
182var (
183 protocolPriority = []string{"https", "http"}
184 signerPriority = []string{"v4", "v2"}
185)
186
187func getByPriority(s []string, p []string, def string) string {
188 if len(s) == 0 {
189 return def
190 }
191
192 for i := 0; i < len(p); i++ {
193 for j := 0; j < len(s); j++ {
194 if s[j] == p[i] {
195 return s[j]
196 }
197 }
198 }
199
200 return s[0]
201}
202
203func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
204 var merged endpoint
205 for _, def := range defs {
206 merged.mergeIn(def)
207 }
208 merged.mergeIn(e)
209 e = merged
210
211 hostname := e.Hostname
212
213 // Offset the hostname for dualstack if enabled
214 if opts.UseDualStack && e.HasDualStack == boxedTrue {
215 hostname = e.DualStackHostname
216 }
217
218 u := strings.Replace(hostname, "{service}", service, 1)
219 u = strings.Replace(u, "{region}", region, 1)
220 u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
221
222 scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
223 u = fmt.Sprintf("%s://%s", scheme, u)
224
225 signingRegion := e.CredentialScope.Region
226 if len(signingRegion) == 0 {
227 signingRegion = region
228 }
229 signingName := e.CredentialScope.Service
230 if len(signingName) == 0 {
231 signingName = service
232 }
233
234 return ResolvedEndpoint{
235 URL: u,
236 SigningRegion: signingRegion,
237 SigningName: signingName,
238 SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
239 }
240}
241
242func getEndpointScheme(protocols []string, disableSSL bool) string {
243 if disableSSL {
244 return "http"
245 }
246
247 return getByPriority(protocols, protocolPriority, defaultProtocol)
248}
249
250func (e *endpoint) mergeIn(other endpoint) {
251 if len(other.Hostname) > 0 {
252 e.Hostname = other.Hostname
253 }
254 if len(other.Protocols) > 0 {
255 e.Protocols = other.Protocols
256 }
257 if len(other.SignatureVersions) > 0 {
258 e.SignatureVersions = other.SignatureVersions
259 }
260 if len(other.CredentialScope.Region) > 0 {
261 e.CredentialScope.Region = other.CredentialScope.Region
262 }
263 if len(other.CredentialScope.Service) > 0 {
264 e.CredentialScope.Service = other.CredentialScope.Service
265 }
266 if len(other.SSLCommonName) > 0 {
267 e.SSLCommonName = other.SSLCommonName
268 }
269 if other.HasDualStack != boxedBoolUnset {
270 e.HasDualStack = other.HasDualStack
271 }
272 if len(other.DualStackHostname) > 0 {
273 e.DualStackHostname = other.DualStackHostname
274 }
275}
276
277type credentialScope struct {
278 Region string `json:"region"`
279 Service string `json:"service"`
280}
281
282type boxedBool int
283
284func (b *boxedBool) UnmarshalJSON(buf []byte) error {
285 v, err := strconv.ParseBool(string(buf))
286 if err != nil {
287 return err
288 }
289
290 if v {
291 *b = boxedTrue
292 } else {
293 *b = boxedFalse
294 }
295
296 return nil
297}
298
299const (
300 boxedBoolUnset boxedBool = iota
301 boxedFalse
302 boxedTrue
303)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
new file mode 100644
index 0000000..05e92df
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
@@ -0,0 +1,337 @@
1// +build codegen
2
3package endpoints
4
5import (
6 "fmt"
7 "io"
8 "reflect"
9 "strings"
10 "text/template"
11 "unicode"
12)
13
14// A CodeGenOptions are the options for code generating the endpoints into
15// Go code from the endpoints model definition.
16type CodeGenOptions struct {
17 // Options for how the model will be decoded.
18 DecodeModelOptions DecodeModelOptions
19}
20
21// Set combines all of the option functions together
22func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
23 for _, fn := range optFns {
24 fn(d)
25 }
26}
27
28// CodeGenModel given a endpoints model file will decode it and attempt to
29// generate Go code from the model definition. Error will be returned if
30// the code is unable to be generated, or decoded.
31func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
32 var opts CodeGenOptions
33 opts.Set(optFns...)
34
35 resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
36 *d = opts.DecodeModelOptions
37 })
38 if err != nil {
39 return err
40 }
41
42 tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
43 if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil {
44 return fmt.Errorf("failed to execute template, %v", err)
45 }
46
47 return nil
48}
49
50func toSymbol(v string) string {
51 out := []rune{}
52 for _, c := range strings.Title(v) {
53 if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
54 continue
55 }
56
57 out = append(out, c)
58 }
59
60 return string(out)
61}
62
63func quoteString(v string) string {
64 return fmt.Sprintf("%q", v)
65}
66
67func regionConstName(p, r string) string {
68 return toSymbol(p) + toSymbol(r)
69}
70
71func partitionGetter(id string) string {
72 return fmt.Sprintf("%sPartition", toSymbol(id))
73}
74
75func partitionVarName(id string) string {
76 return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
77}
78
79func listPartitionNames(ps partitions) string {
80 names := []string{}
81 switch len(ps) {
82 case 1:
83 return ps[0].Name
84 case 2:
85 return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
86 default:
87 for i, p := range ps {
88 if i == len(ps)-1 {
89 names = append(names, "and "+p.Name)
90 } else {
91 names = append(names, p.Name)
92 }
93 }
94 return strings.Join(names, ", ")
95 }
96}
97
98func boxedBoolIfSet(msg string, v boxedBool) string {
99 switch v {
100 case boxedTrue:
101 return fmt.Sprintf(msg, "boxedTrue")
102 case boxedFalse:
103 return fmt.Sprintf(msg, "boxedFalse")
104 default:
105 return ""
106 }
107}
108
109func stringIfSet(msg, v string) string {
110 if len(v) == 0 {
111 return ""
112 }
113
114 return fmt.Sprintf(msg, v)
115}
116
117func stringSliceIfSet(msg string, vs []string) string {
118 if len(vs) == 0 {
119 return ""
120 }
121
122 names := []string{}
123 for _, v := range vs {
124 names = append(names, `"`+v+`"`)
125 }
126
127 return fmt.Sprintf(msg, strings.Join(names, ","))
128}
129
130func endpointIsSet(v endpoint) bool {
131 return !reflect.DeepEqual(v, endpoint{})
132}
133
134func serviceSet(ps partitions) map[string]struct{} {
135 set := map[string]struct{}{}
136 for _, p := range ps {
137 for id := range p.Services {
138 set[id] = struct{}{}
139 }
140 }
141
142 return set
143}
144
145var funcMap = template.FuncMap{
146 "ToSymbol": toSymbol,
147 "QuoteString": quoteString,
148 "RegionConst": regionConstName,
149 "PartitionGetter": partitionGetter,
150 "PartitionVarName": partitionVarName,
151 "ListPartitionNames": listPartitionNames,
152 "BoxedBoolIfSet": boxedBoolIfSet,
153 "StringIfSet": stringIfSet,
154 "StringSliceIfSet": stringSliceIfSet,
155 "EndpointIsSet": endpointIsSet,
156 "ServicesSet": serviceSet,
157}
158
159const v3Tmpl = `
160{{ define "defaults" -}}
161// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
162
163package endpoints
164
165import (
166 "regexp"
167)
168
169 {{ template "partition consts" . }}
170
171 {{ range $_, $partition := . }}
172 {{ template "partition region consts" $partition }}
173 {{ end }}
174
175 {{ template "service consts" . }}
176
177 {{ template "endpoint resolvers" . }}
178{{- end }}
179
180{{ define "partition consts" }}
181 // Partition identifiers
182 const (
183 {{ range $_, $p := . -}}
184 {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
185 {{ end -}}
186 )
187{{- end }}
188
189{{ define "partition region consts" }}
190 // {{ .Name }} partition's regions.
191 const (
192 {{ range $id, $region := .Regions -}}
193 {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
194 {{ end -}}
195 )
196{{- end }}
197
198{{ define "service consts" }}
199 // Service identifiers
200 const (
201 {{ $serviceSet := ServicesSet . -}}
202 {{ range $id, $_ := $serviceSet -}}
203 {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
204 {{ end -}}
205 )
206{{- end }}
207
208{{ define "endpoint resolvers" }}
209 // DefaultResolver returns an Endpoint resolver that will be able
210 // to resolve endpoints for: {{ ListPartitionNames . }}.
211 //
212 // Use DefaultPartitions() to get the list of the default partitions.
213 func DefaultResolver() Resolver {
214 return defaultPartitions
215 }
216
217 // DefaultPartitions returns a list of the partitions the SDK is bundled
218 // with. The available partitions are: {{ ListPartitionNames . }}.
219 //
220 // partitions := endpoints.DefaultPartitions
221 // for _, p := range partitions {
222 // // ... inspect partitions
223 // }
224 func DefaultPartitions() []Partition {
225 return defaultPartitions.Partitions()
226 }
227
228 var defaultPartitions = partitions{
229 {{ range $_, $partition := . -}}
230 {{ PartitionVarName $partition.ID }},
231 {{ end }}
232 }
233
234 {{ range $_, $partition := . -}}
235 {{ $name := PartitionGetter $partition.ID -}}
236 // {{ $name }} returns the Resolver for {{ $partition.Name }}.
237 func {{ $name }}() Partition {
238 return {{ PartitionVarName $partition.ID }}.Partition()
239 }
240 var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
241 {{ end }}
242{{ end }}
243
244{{ define "default partitions" }}
245 func DefaultPartitions() []Partition {
246 return []partition{
247 {{ range $_, $partition := . -}}
248 // {{ ToSymbol $partition.ID}}Partition(),
249 {{ end }}
250 }
251 }
252{{ end }}
253
254{{ define "gocode Partition" -}}
255partition{
256 {{ StringIfSet "ID: %q,\n" .ID -}}
257 {{ StringIfSet "Name: %q,\n" .Name -}}
258 {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
259 RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
260 {{ if EndpointIsSet .Defaults -}}
261 Defaults: {{ template "gocode Endpoint" .Defaults }},
262 {{- end }}
263 Regions: {{ template "gocode Regions" .Regions }},
264 Services: {{ template "gocode Services" .Services }},
265}
266{{- end }}
267
268{{ define "gocode RegionRegex" -}}
269regionRegex{
270 Regexp: func() *regexp.Regexp{
271 reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
272 return reg
273 }(),
274}
275{{- end }}
276
277{{ define "gocode Regions" -}}
278regions{
279 {{ range $id, $region := . -}}
280 "{{ $id }}": {{ template "gocode Region" $region }},
281 {{ end -}}
282}
283{{- end }}
284
285{{ define "gocode Region" -}}
286region{
287 {{ StringIfSet "Description: %q,\n" .Description -}}
288}
289{{- end }}
290
291{{ define "gocode Services" -}}
292services{
293 {{ range $id, $service := . -}}
294 "{{ $id }}": {{ template "gocode Service" $service }},
295 {{ end }}
296}
297{{- end }}
298
299{{ define "gocode Service" -}}
300service{
301 {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
302 {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
303 {{ if EndpointIsSet .Defaults -}}
304 Defaults: {{ template "gocode Endpoint" .Defaults -}},
305 {{- end }}
306 {{ if .Endpoints -}}
307 Endpoints: {{ template "gocode Endpoints" .Endpoints }},
308 {{- end }}
309}
310{{- end }}
311
312{{ define "gocode Endpoints" -}}
313endpoints{
314 {{ range $id, $endpoint := . -}}
315 "{{ $id }}": {{ template "gocode Endpoint" $endpoint }},
316 {{ end }}
317}
318{{- end }}
319
320{{ define "gocode Endpoint" -}}
321endpoint{
322 {{ StringIfSet "Hostname: %q,\n" .Hostname -}}
323 {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
324 {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
325 {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
326 {{ if or .CredentialScope.Region .CredentialScope.Service -}}
327 CredentialScope: credentialScope{
328 {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
329 {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
330 },
331 {{- end }}
332 {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}}
333 {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}}
334
335}
336{{- end }}
337`
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
new file mode 100644
index 0000000..5766361
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
@@ -0,0 +1,17 @@
1package aws
2
3import "github.com/aws/aws-sdk-go/aws/awserr"
4
5var (
6 // ErrMissingRegion is an error that is returned if region configuration is
7 // not found.
8 //
9 // @readonly
10 ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
11
12 // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
13 // resolved for a service.
14 //
15 // @readonly
16 ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
17)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
new file mode 100644
index 0000000..91a6f27
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
@@ -0,0 +1,12 @@
1package aws
2
3// JSONValue is a representation of a grab bag type that will be marshaled
4// into a json string. This type can be used just like any other map.
5//
6// Example:
7//
8// values := aws.JSONValue{
9// "Foo": "Bar",
10// }
11// values["Baz"] = "Qux"
12type JSONValue map[string]interface{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
new file mode 100644
index 0000000..db87188
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
@@ -0,0 +1,112 @@
1package aws
2
3import (
4 "log"
5 "os"
6)
7
8// A LogLevelType defines the level logging should be performed at. Used to instruct
9// the SDK which statements should be logged.
10type LogLevelType uint
11
12// LogLevel returns the pointer to a LogLevel. Should be used to workaround
13// not being able to take the address of a non-composite literal.
14func LogLevel(l LogLevelType) *LogLevelType {
15 return &l
16}
17
18// Value returns the LogLevel value or the default value LogOff if the LogLevel
19// is nil. Safe to use on nil value LogLevelTypes.
20func (l *LogLevelType) Value() LogLevelType {
21 if l != nil {
22 return *l
23 }
24 return LogOff
25}
26
27// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
28// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
29// LogLevel is nill, will default to LogOff comparison.
30func (l *LogLevelType) Matches(v LogLevelType) bool {
31 c := l.Value()
32 return c&v == v
33}
34
35// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
36// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
37// to LogOff comparison.
38func (l *LogLevelType) AtLeast(v LogLevelType) bool {
39 c := l.Value()
40 return c >= v
41}
42
43const (
44 // LogOff states that no logging should be performed by the SDK. This is the
45 // default state of the SDK, and should be use to disable all logging.
46 LogOff LogLevelType = iota * 0x1000
47
48 // LogDebug state that debug output should be logged by the SDK. This should
49 // be used to inspect request made and responses received.
50 LogDebug
51)
52
53// Debug Logging Sub Levels
54const (
55 // LogDebugWithSigning states that the SDK should log request signing and
56 // presigning events. This should be used to log the signing details of
57 // requests for debugging. Will also enable LogDebug.
58 LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
59
60 // LogDebugWithHTTPBody states the SDK should log HTTP request and response
61 // HTTP bodys in addition to the headers and path. This should be used to
62 // see the body content of requests and responses made while using the SDK
63 // Will also enable LogDebug.
64 LogDebugWithHTTPBody
65
66 // LogDebugWithRequestRetries states the SDK should log when service requests will
67 // be retried. This should be used to log when you want to log when service
68 // requests are being retried. Will also enable LogDebug.
69 LogDebugWithRequestRetries
70
71 // LogDebugWithRequestErrors states the SDK should log when service requests fail
72 // to build, send, validate, or unmarshal.
73 LogDebugWithRequestErrors
74)
75
76// A Logger is a minimalistic interface for the SDK to log messages to. Should
77// be used to provide custom logging writers for the SDK to use.
78type Logger interface {
79 Log(...interface{})
80}
81
82// A LoggerFunc is a convenience type to convert a function taking a variadic
83// list of arguments and wrap it so the Logger interface can be used.
84//
85// Example:
86// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
87// fmt.Fprintln(os.Stdout, args...)
88// })})
89type LoggerFunc func(...interface{})
90
91// Log calls the wrapped function with the arguments provided
92func (f LoggerFunc) Log(args ...interface{}) {
93 f(args...)
94}
95
96// NewDefaultLogger returns a Logger which will write log messages to stdout, and
97// use same formatting runes as the stdlib log.Logger
98func NewDefaultLogger() Logger {
99 return &defaultLogger{
100 logger: log.New(os.Stdout, "", log.LstdFlags),
101 }
102}
103
104// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
105type defaultLogger struct {
106 logger *log.Logger
107}
108
109// Log logs the parameters to the stdlib logger. See log.Println.
110func (l defaultLogger) Log(args ...interface{}) {
111 l.logger.Println(args...)
112}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
new file mode 100644
index 0000000..10fc8cb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
@@ -0,0 +1,19 @@
1// +build !appengine
2
3package request
4
5import (
6 "net"
7 "os"
8 "syscall"
9)
10
11func isErrConnectionReset(err error) bool {
12 if opErr, ok := err.(*net.OpError); ok {
13 if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
14 return sysErr.Err == syscall.ECONNRESET
15 }
16 }
17
18 return false
19}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_appengine.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_appengine.go
new file mode 100644
index 0000000..996196e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_appengine.go
@@ -0,0 +1,11 @@
1// +build appengine
2
3package request
4
5import (
6 "strings"
7)
8
9func isErrConnectionReset(err error) bool {
10 return strings.Contains(err.Error(), "connection reset")
11}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
new file mode 100644
index 0000000..6c14336
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
@@ -0,0 +1,225 @@
1package request
2
3import (
4 "fmt"
5 "strings"
6)
7
8// A Handlers provides a collection of request handlers for various
9// stages of handling requests.
10type Handlers struct {
11 Validate HandlerList
12 Build HandlerList
13 Sign HandlerList
14 Send HandlerList
15 ValidateResponse HandlerList
16 Unmarshal HandlerList
17 UnmarshalMeta HandlerList
18 UnmarshalError HandlerList
19 Retry HandlerList
20 AfterRetry HandlerList
21 Complete HandlerList
22}
23
24// Copy returns of this handler's lists.
25func (h *Handlers) Copy() Handlers {
26 return Handlers{
27 Validate: h.Validate.copy(),
28 Build: h.Build.copy(),
29 Sign: h.Sign.copy(),
30 Send: h.Send.copy(),
31 ValidateResponse: h.ValidateResponse.copy(),
32 Unmarshal: h.Unmarshal.copy(),
33 UnmarshalError: h.UnmarshalError.copy(),
34 UnmarshalMeta: h.UnmarshalMeta.copy(),
35 Retry: h.Retry.copy(),
36 AfterRetry: h.AfterRetry.copy(),
37 Complete: h.Complete.copy(),
38 }
39}
40
41// Clear removes callback functions for all handlers
42func (h *Handlers) Clear() {
43 h.Validate.Clear()
44 h.Build.Clear()
45 h.Send.Clear()
46 h.Sign.Clear()
47 h.Unmarshal.Clear()
48 h.UnmarshalMeta.Clear()
49 h.UnmarshalError.Clear()
50 h.ValidateResponse.Clear()
51 h.Retry.Clear()
52 h.AfterRetry.Clear()
53 h.Complete.Clear()
54}
55
56// A HandlerListRunItem represents an entry in the HandlerList which
57// is being run.
58type HandlerListRunItem struct {
59 Index int
60 Handler NamedHandler
61 Request *Request
62}
63
64// A HandlerList manages zero or more handlers in a list.
65type HandlerList struct {
66 list []NamedHandler
67
68 // Called after each request handler in the list is called. If set
69 // and the func returns true the HandlerList will continue to iterate
70 // over the request handlers. If false is returned the HandlerList
71 // will stop iterating.
72 //
73 // Should be used if extra logic to be performed between each handler
74 // in the list. This can be used to terminate a list's iteration
75 // based on a condition such as error like, HandlerListStopOnError.
76 // Or for logging like HandlerListLogItem.
77 AfterEachFn func(item HandlerListRunItem) bool
78}
79
80// A NamedHandler is a struct that contains a name and function callback.
81type NamedHandler struct {
82 Name string
83 Fn func(*Request)
84}
85
86// copy creates a copy of the handler list.
87func (l *HandlerList) copy() HandlerList {
88 n := HandlerList{
89 AfterEachFn: l.AfterEachFn,
90 }
91 if len(l.list) == 0 {
92 return n
93 }
94
95 n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
96 return n
97}
98
99// Clear clears the handler list.
100func (l *HandlerList) Clear() {
101 l.list = l.list[0:0]
102}
103
104// Len returns the number of handlers in the list.
105func (l *HandlerList) Len() int {
106 return len(l.list)
107}
108
109// PushBack pushes handler f to the back of the handler list.
110func (l *HandlerList) PushBack(f func(*Request)) {
111 l.PushBackNamed(NamedHandler{"__anonymous", f})
112}
113
114// PushBackNamed pushes named handler f to the back of the handler list.
115func (l *HandlerList) PushBackNamed(n NamedHandler) {
116 if cap(l.list) == 0 {
117 l.list = make([]NamedHandler, 0, 5)
118 }
119 l.list = append(l.list, n)
120}
121
122// PushFront pushes handler f to the front of the handler list.
123func (l *HandlerList) PushFront(f func(*Request)) {
124 l.PushFrontNamed(NamedHandler{"__anonymous", f})
125}
126
127// PushFrontNamed pushes named handler f to the front of the handler list.
128func (l *HandlerList) PushFrontNamed(n NamedHandler) {
129 if cap(l.list) == len(l.list) {
130 // Allocating new list required
131 l.list = append([]NamedHandler{n}, l.list...)
132 } else {
133 // Enough room to prepend into list.
134 l.list = append(l.list, NamedHandler{})
135 copy(l.list[1:], l.list)
136 l.list[0] = n
137 }
138}
139
140// Remove removes a NamedHandler n
141func (l *HandlerList) Remove(n NamedHandler) {
142 l.RemoveByName(n.Name)
143}
144
145// RemoveByName removes a NamedHandler by name.
146func (l *HandlerList) RemoveByName(name string) {
147 for i := 0; i < len(l.list); i++ {
148 m := l.list[i]
149 if m.Name == name {
150 // Shift array preventing creating new arrays
151 copy(l.list[i:], l.list[i+1:])
152 l.list[len(l.list)-1] = NamedHandler{}
153 l.list = l.list[:len(l.list)-1]
154
155 // decrement list so next check to length is correct
156 i--
157 }
158 }
159}
160
161// Run executes all handlers in the list with a given request object.
162func (l *HandlerList) Run(r *Request) {
163 for i, h := range l.list {
164 h.Fn(r)
165 item := HandlerListRunItem{
166 Index: i, Handler: h, Request: r,
167 }
168 if l.AfterEachFn != nil && !l.AfterEachFn(item) {
169 return
170 }
171 }
172}
173
174// HandlerListLogItem logs the request handler and the state of the
175// request's Error value. Always returns true to continue iterating
176// request handlers in a HandlerList.
177func HandlerListLogItem(item HandlerListRunItem) bool {
178 if item.Request.Config.Logger == nil {
179 return true
180 }
181 item.Request.Config.Logger.Log("DEBUG: RequestHandler",
182 item.Index, item.Handler.Name, item.Request.Error)
183
184 return true
185}
186
187// HandlerListStopOnError returns false to stop the HandlerList iterating
188// over request handlers if Request.Error is not nil. True otherwise
189// to continue iterating.
190func HandlerListStopOnError(item HandlerListRunItem) bool {
191 return item.Request.Error == nil
192}
193
194// WithAppendUserAgent will add a string to the user agent prefixed with a
195// single white space.
196func WithAppendUserAgent(s string) Option {
197 return func(r *Request) {
198 r.Handlers.Build.PushBack(func(r2 *Request) {
199 AddToUserAgent(r, s)
200 })
201 }
202}
203
204// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
205// header. If the extra parameters are provided they will be added as metadata to the
206// name/version pair resulting in the following format.
207// "name/version (extra0; extra1; ...)"
208// The user agent part will be concatenated with this current request's user agent string.
209func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
210 ua := fmt.Sprintf("%s/%s", name, version)
211 if len(extra) > 0 {
212 ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
213 }
214 return func(r *Request) {
215 AddToUserAgent(r, ua)
216 }
217}
218
219// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
220// The input string will be concatenated with the current request's user agent string.
221func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
222 return func(r *Request) {
223 AddToUserAgent(r, s)
224 }
225}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
new file mode 100644
index 0000000..79f7960
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
@@ -0,0 +1,24 @@
1package request
2
3import (
4 "io"
5 "net/http"
6 "net/url"
7)
8
9func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
10 req := new(http.Request)
11 *req = *r
12 req.URL = &url.URL{}
13 *req.URL = *r.URL
14 req.Body = body
15
16 req.Header = http.Header{}
17 for k, v := range r.Header {
18 for _, vv := range v {
19 req.Header.Add(k, vv)
20 }
21 }
22
23 return req
24}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
new file mode 100644
index 0000000..02f07f4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
@@ -0,0 +1,58 @@
1package request
2
3import (
4 "io"
5 "sync"
6)
7
8// offsetReader is a thread-safe io.ReadCloser to prevent racing
9// with retrying requests
10type offsetReader struct {
11 buf io.ReadSeeker
12 lock sync.Mutex
13 closed bool
14}
15
16func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
17 reader := &offsetReader{}
18 buf.Seek(offset, 0)
19
20 reader.buf = buf
21 return reader
22}
23
24// Close will close the instance of the offset reader's access to
25// the underlying io.ReadSeeker.
26func (o *offsetReader) Close() error {
27 o.lock.Lock()
28 defer o.lock.Unlock()
29 o.closed = true
30 return nil
31}
32
33// Read is a thread-safe read of the underlying io.ReadSeeker
34func (o *offsetReader) Read(p []byte) (int, error) {
35 o.lock.Lock()
36 defer o.lock.Unlock()
37
38 if o.closed {
39 return 0, io.EOF
40 }
41
42 return o.buf.Read(p)
43}
44
45// Seek is a thread-safe seeking operation.
46func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
47 o.lock.Lock()
48 defer o.lock.Unlock()
49
50 return o.buf.Seek(offset, whence)
51}
52
53// CloseAndCopy will return a new offsetReader with a copy of the old buffer
54// and close the old buffer.
55func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
56 o.Close()
57 return newOffsetReader(o.buf, offset)
58}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
new file mode 100644
index 0000000..4f4f112
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -0,0 +1,575 @@
1package request
2
3import (
4 "bytes"
5 "fmt"
6 "io"
7 "net"
8 "net/http"
9 "net/url"
10 "reflect"
11 "strings"
12 "time"
13
14 "github.com/aws/aws-sdk-go/aws"
15 "github.com/aws/aws-sdk-go/aws/awserr"
16 "github.com/aws/aws-sdk-go/aws/client/metadata"
17)
18
19const (
20 // ErrCodeSerialization is the serialization error code that is received
21 // during protocol unmarshaling.
22 ErrCodeSerialization = "SerializationError"
23
24 // ErrCodeRead is an error that is returned during HTTP reads.
25 ErrCodeRead = "ReadError"
26
27 // ErrCodeResponseTimeout is the connection timeout error that is recieved
28 // during body reads.
29 ErrCodeResponseTimeout = "ResponseTimeout"
30
31 // CanceledErrorCode is the error code that will be returned by an
32 // API request that was canceled. Requests given a aws.Context may
33 // return this error when canceled.
34 CanceledErrorCode = "RequestCanceled"
35)
36
37// A Request is the service request to be made.
38type Request struct {
39 Config aws.Config
40 ClientInfo metadata.ClientInfo
41 Handlers Handlers
42
43 Retryer
44 Time time.Time
45 ExpireTime time.Duration
46 Operation *Operation
47 HTTPRequest *http.Request
48 HTTPResponse *http.Response
49 Body io.ReadSeeker
50 BodyStart int64 // offset from beginning of Body that the request body starts
51 Params interface{}
52 Error error
53 Data interface{}
54 RequestID string
55 RetryCount int
56 Retryable *bool
57 RetryDelay time.Duration
58 NotHoist bool
59 SignedHeaderVals http.Header
60 LastSignedAt time.Time
61 DisableFollowRedirects bool
62
63 context aws.Context
64
65 built bool
66
67 // Need to persist an intermediate body between the input Body and HTTP
68 // request body because the HTTP Client's transport can maintain a reference
69 // to the HTTP request's body after the client has returned. This value is
70 // safe to use concurrently and wrap the input Body for each HTTP request.
71 safeBody *offsetReader
72}
73
74// An Operation is the service API operation to be made.
75type Operation struct {
76 Name string
77 HTTPMethod string
78 HTTPPath string
79 *Paginator
80
81 BeforePresignFn func(r *Request) error
82}
83
84// New returns a new Request pointer for the service API
85// operation and parameters.
86//
87// Params is any value of input parameters to be the request payload.
88// Data is pointer value to an object which the request's response
89// payload will be deserialized to.
90func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
91 retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
92
93 method := operation.HTTPMethod
94 if method == "" {
95 method = "POST"
96 }
97
98 httpReq, _ := http.NewRequest(method, "", nil)
99
100 var err error
101 httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
102 if err != nil {
103 httpReq.URL = &url.URL{}
104 err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
105 }
106
107 r := &Request{
108 Config: cfg,
109 ClientInfo: clientInfo,
110 Handlers: handlers.Copy(),
111
112 Retryer: retryer,
113 Time: time.Now(),
114 ExpireTime: 0,
115 Operation: operation,
116 HTTPRequest: httpReq,
117 Body: nil,
118 Params: params,
119 Error: err,
120 Data: data,
121 }
122 r.SetBufferBody([]byte{})
123
124 return r
125}
126
127// A Option is a functional option that can augment or modify a request when
128// using a WithContext API operation method.
129type Option func(*Request)
130
131// WithGetResponseHeader builds a request Option which will retrieve a single
132// header value from the HTTP Response. If there are multiple values for the
133// header key use WithGetResponseHeaders instead to access the http.Header
134// map directly. The passed in val pointer must be non-nil.
135//
136// This Option can be used multiple times with a single API operation.
137//
138// var id2, versionID string
139// svc.PutObjectWithContext(ctx, params,
140// request.WithGetResponseHeader("x-amz-id-2", &id2),
141// request.WithGetResponseHeader("x-amz-version-id", &versionID),
142// )
143func WithGetResponseHeader(key string, val *string) Option {
144 return func(r *Request) {
145 r.Handlers.Complete.PushBack(func(req *Request) {
146 *val = req.HTTPResponse.Header.Get(key)
147 })
148 }
149}
150
151// WithGetResponseHeaders builds a request Option which will retrieve the
152// headers from the HTTP response and assign them to the passed in headers
153// variable. The passed in headers pointer must be non-nil.
154//
155// var headers http.Header
156// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers))
157func WithGetResponseHeaders(headers *http.Header) Option {
158 return func(r *Request) {
159 r.Handlers.Complete.PushBack(func(req *Request) {
160 *headers = req.HTTPResponse.Header
161 })
162 }
163}
164
165// WithLogLevel is a request option that will set the request to use a specific
166// log level when the request is made.
167//
168// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody)
169func WithLogLevel(l aws.LogLevelType) Option {
170 return func(r *Request) {
171 r.Config.LogLevel = aws.LogLevel(l)
172 }
173}
174
175// ApplyOptions will apply each option to the request calling them in the order
176// the were provided.
177func (r *Request) ApplyOptions(opts ...Option) {
178 for _, opt := range opts {
179 opt(r)
180 }
181}
182
183// Context will always returns a non-nil context. If Request does not have a
184// context aws.BackgroundContext will be returned.
185func (r *Request) Context() aws.Context {
186 if r.context != nil {
187 return r.context
188 }
189 return aws.BackgroundContext()
190}
191
192// SetContext adds a Context to the current request that can be used to cancel
193// a in-flight request. The Context value must not be nil, or this method will
194// panic.
195//
196// Unlike http.Request.WithContext, SetContext does not return a copy of the
197// Request. It is not safe to use use a single Request value for multiple
198// requests. A new Request should be created for each API operation request.
199//
200// Go 1.6 and below:
201// The http.Request's Cancel field will be set to the Done() value of
202// the context. This will overwrite the Cancel field's value.
203//
204// Go 1.7 and above:
205// The http.Request.WithContext will be used to set the context on the underlying
206// http.Request. This will create a shallow copy of the http.Request. The SDK
207// may create sub contexts in the future for nested requests such as retries.
208func (r *Request) SetContext(ctx aws.Context) {
209 if ctx == nil {
210 panic("context cannot be nil")
211 }
212 setRequestContext(r, ctx)
213}
214
215// WillRetry returns if the request's can be retried.
216func (r *Request) WillRetry() bool {
217 return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
218}
219
220// ParamsFilled returns if the request's parameters have been populated
221// and the parameters are valid. False is returned if no parameters are
222// provided or invalid.
223func (r *Request) ParamsFilled() bool {
224 return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
225}
226
227// DataFilled returns true if the request's data for response deserialization
228// target has been set and is a valid. False is returned if data is not
229// set, or is invalid.
230func (r *Request) DataFilled() bool {
231 return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
232}
233
234// SetBufferBody will set the request's body bytes that will be sent to
235// the service API.
236func (r *Request) SetBufferBody(buf []byte) {
237 r.SetReaderBody(bytes.NewReader(buf))
238}
239
240// SetStringBody sets the body of the request to be backed by a string.
241func (r *Request) SetStringBody(s string) {
242 r.SetReaderBody(strings.NewReader(s))
243}
244
245// SetReaderBody will set the request's body reader.
246func (r *Request) SetReaderBody(reader io.ReadSeeker) {
247 r.Body = reader
248 r.ResetBody()
249}
250
251// Presign returns the request's signed URL. Error will be returned
252// if the signing fails.
253func (r *Request) Presign(expireTime time.Duration) (string, error) {
254 r.ExpireTime = expireTime
255 r.NotHoist = false
256
257 if r.Operation.BeforePresignFn != nil {
258 r = r.copy()
259 err := r.Operation.BeforePresignFn(r)
260 if err != nil {
261 return "", err
262 }
263 }
264
265 r.Sign()
266 if r.Error != nil {
267 return "", r.Error
268 }
269 return r.HTTPRequest.URL.String(), nil
270}
271
272// PresignRequest behaves just like presign, but hoists all headers and signs them.
273// Also returns the signed hash back to the user
274func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
275 r.ExpireTime = expireTime
276 r.NotHoist = true
277 r.Sign()
278 if r.Error != nil {
279 return "", nil, r.Error
280 }
281 return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
282}
283
284func debugLogReqError(r *Request, stage string, retrying bool, err error) {
285 if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
286 return
287 }
288
289 retryStr := "not retrying"
290 if retrying {
291 retryStr = "will retry"
292 }
293
294 r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
295 stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
296}
297
298// Build will build the request's object so it can be signed and sent
299// to the service. Build will also validate all the request's parameters.
300// Anny additional build Handlers set on this request will be run
301// in the order they were set.
302//
303// The request will only be built once. Multiple calls to build will have
304// no effect.
305//
306// If any Validate or Build errors occur the build will stop and the error
307// which occurred will be returned.
308func (r *Request) Build() error {
309 if !r.built {
310 r.Handlers.Validate.Run(r)
311 if r.Error != nil {
312 debugLogReqError(r, "Validate Request", false, r.Error)
313 return r.Error
314 }
315 r.Handlers.Build.Run(r)
316 if r.Error != nil {
317 debugLogReqError(r, "Build Request", false, r.Error)
318 return r.Error
319 }
320 r.built = true
321 }
322
323 return r.Error
324}
325
326// Sign will sign the request returning error if errors are encountered.
327//
328// Send will build the request prior to signing. All Sign Handlers will
329// be executed in the order they were set.
330func (r *Request) Sign() error {
331 r.Build()
332 if r.Error != nil {
333 debugLogReqError(r, "Build Request", false, r.Error)
334 return r.Error
335 }
336
337 r.Handlers.Sign.Run(r)
338 return r.Error
339}
340
341// ResetBody rewinds the request body backto its starting position, and
342// set's the HTTP Request body reference. When the body is read prior
343// to being sent in the HTTP request it will need to be rewound.
344func (r *Request) ResetBody() {
345 if r.safeBody != nil {
346 r.safeBody.Close()
347 }
348
349 r.safeBody = newOffsetReader(r.Body, r.BodyStart)
350
351 // Go 1.8 tightened and clarified the rules code needs to use when building
352 // requests with the http package. Go 1.8 removed the automatic detection
353 // of if the Request.Body was empty, or actually had bytes in it. The SDK
354 // always sets the Request.Body even if it is empty and should not actually
355 // be sent. This is incorrect.
356 //
357 // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http
358 // client that the request really should be sent without a body. The
359 // Request.Body cannot be set to nil, which is preferable, because the
360 // field is exported and could introduce nil pointer dereferences for users
361 // of the SDK if they used that field.
362 //
363 // Related golang/go#18257
364 l, err := computeBodyLength(r.Body)
365 if err != nil {
366 r.Error = awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
367 return
368 }
369
370 if l == 0 {
371 r.HTTPRequest.Body = noBodyReader
372 } else if l > 0 {
373 r.HTTPRequest.Body = r.safeBody
374 } else {
375 // Hack to prevent sending bodies for methods where the body
376 // should be ignored by the server. Sending bodies on these
377 // methods without an associated ContentLength will cause the
378 // request to socket timeout because the server does not handle
379 // Transfer-Encoding: chunked bodies for these methods.
380 //
381 // This would only happen if a aws.ReaderSeekerCloser was used with
382 // a io.Reader that was not also an io.Seeker.
383 switch r.Operation.HTTPMethod {
384 case "GET", "HEAD", "DELETE":
385 r.HTTPRequest.Body = noBodyReader
386 default:
387 r.HTTPRequest.Body = r.safeBody
388 }
389 }
390}
391
392// Attempts to compute the length of the body of the reader using the
393// io.Seeker interface. If the value is not seekable because of being
394// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned.
395// If no error occurs the length of the body will be returned.
396func computeBodyLength(r io.ReadSeeker) (int64, error) {
397 seekable := true
398 // Determine if the seeker is actually seekable. ReaderSeekerCloser
399 // hides the fact that a io.Readers might not actually be seekable.
400 switch v := r.(type) {
401 case aws.ReaderSeekerCloser:
402 seekable = v.IsSeeker()
403 case *aws.ReaderSeekerCloser:
404 seekable = v.IsSeeker()
405 }
406 if !seekable {
407 return -1, nil
408 }
409
410 curOffset, err := r.Seek(0, 1)
411 if err != nil {
412 return 0, err
413 }
414
415 endOffset, err := r.Seek(0, 2)
416 if err != nil {
417 return 0, err
418 }
419
420 _, err = r.Seek(curOffset, 0)
421 if err != nil {
422 return 0, err
423 }
424
425 return endOffset - curOffset, nil
426}
427
428// GetBody will return an io.ReadSeeker of the Request's underlying
429// input body with a concurrency safe wrapper.
430func (r *Request) GetBody() io.ReadSeeker {
431 return r.safeBody
432}
433
434// Send will send the request returning error if errors are encountered.
435//
436// Send will sign the request prior to sending. All Send Handlers will
437// be executed in the order they were set.
438//
439// Canceling a request is non-deterministic. If a request has been canceled,
440// then the transport will choose, randomly, one of the state channels during
441// reads or getting the connection.
442//
443// readLoop() and getConn(req *Request, cm connectMethod)
444// https://github.com/golang/go/blob/master/src/net/http/transport.go
445//
446// Send will not close the request.Request's body.
447func (r *Request) Send() error {
448 defer func() {
449 // Regardless of success or failure of the request trigger the Complete
450 // request handlers.
451 r.Handlers.Complete.Run(r)
452 }()
453
454 for {
455 if aws.BoolValue(r.Retryable) {
456 if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
457 r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
458 r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
459 }
460
461 // The previous http.Request will have a reference to the r.Body
462 // and the HTTP Client's Transport may still be reading from
463 // the request's body even though the Client's Do returned.
464 r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
465 r.ResetBody()
466
467 // Closing response body to ensure that no response body is leaked
468 // between retry attempts.
469 if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
470 r.HTTPResponse.Body.Close()
471 }
472 }
473
474 r.Sign()
475 if r.Error != nil {
476 return r.Error
477 }
478
479 r.Retryable = nil
480
481 r.Handlers.Send.Run(r)
482 if r.Error != nil {
483 if !shouldRetryCancel(r) {
484 return r.Error
485 }
486
487 err := r.Error
488 r.Handlers.Retry.Run(r)
489 r.Handlers.AfterRetry.Run(r)
490 if r.Error != nil {
491 debugLogReqError(r, "Send Request", false, r.Error)
492 return r.Error
493 }
494 debugLogReqError(r, "Send Request", true, err)
495 continue
496 }
497 r.Handlers.UnmarshalMeta.Run(r)
498 r.Handlers.ValidateResponse.Run(r)
499 if r.Error != nil {
500 err := r.Error
501 r.Handlers.UnmarshalError.Run(r)
502 r.Handlers.Retry.Run(r)
503 r.Handlers.AfterRetry.Run(r)
504 if r.Error != nil {
505 debugLogReqError(r, "Validate Response", false, r.Error)
506 return r.Error
507 }
508 debugLogReqError(r, "Validate Response", true, err)
509 continue
510 }
511
512 r.Handlers.Unmarshal.Run(r)
513 if r.Error != nil {
514 err := r.Error
515 r.Handlers.Retry.Run(r)
516 r.Handlers.AfterRetry.Run(r)
517 if r.Error != nil {
518 debugLogReqError(r, "Unmarshal Response", false, r.Error)
519 return r.Error
520 }
521 debugLogReqError(r, "Unmarshal Response", true, err)
522 continue
523 }
524
525 break
526 }
527
528 return nil
529}
530
531// copy will copy a request which will allow for local manipulation of the
532// request.
533func (r *Request) copy() *Request {
534 req := &Request{}
535 *req = *r
536 req.Handlers = r.Handlers.Copy()
537 op := *r.Operation
538 req.Operation = &op
539 return req
540}
541
542// AddToUserAgent adds the string to the end of the request's current user agent.
543func AddToUserAgent(r *Request, s string) {
544 curUA := r.HTTPRequest.Header.Get("User-Agent")
545 if len(curUA) > 0 {
546 s = curUA + " " + s
547 }
548 r.HTTPRequest.Header.Set("User-Agent", s)
549}
550
551func shouldRetryCancel(r *Request) bool {
552 awsErr, ok := r.Error.(awserr.Error)
553 timeoutErr := false
554 errStr := r.Error.Error()
555 if ok {
556 if awsErr.Code() == CanceledErrorCode {
557 return false
558 }
559 err := awsErr.OrigErr()
560 netErr, netOK := err.(net.Error)
561 timeoutErr = netOK && netErr.Temporary()
562 if urlErr, ok := err.(*url.Error); !timeoutErr && ok {
563 errStr = urlErr.Err.Error()
564 }
565 }
566
567 // There can be two types of canceled errors here.
568 // The first being a net.Error and the other being an error.
569 // If the request was timed out, we want to continue the retry
570 // process. Otherwise, return the canceled error.
571 return timeoutErr ||
572 (errStr != "net/http: request canceled" &&
573 errStr != "net/http: request canceled while waiting for connection")
574
575}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
new file mode 100644
index 0000000..1323af9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
@@ -0,0 +1,21 @@
1// +build !go1.8
2
3package request
4
5import "io"
6
7// NoBody is an io.ReadCloser with no bytes. Read always returns EOF
8// and Close always returns nil. It can be used in an outgoing client
9// request to explicitly signal that a request has zero bytes.
10// An alternative, however, is to simply set Request.Body to nil.
11//
12// Copy of Go 1.8 NoBody type from net/http/http.go
13type noBody struct{}
14
15func (noBody) Read([]byte) (int, error) { return 0, io.EOF }
16func (noBody) Close() error { return nil }
17func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
18
19// Is an empty reader that will trigger the Go HTTP client to not include
20// and body in the HTTP request.
21var noBodyReader = noBody{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
new file mode 100644
index 0000000..8b963f4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
@@ -0,0 +1,9 @@
1// +build go1.8
2
3package request
4
5import "net/http"
6
7// Is a http.NoBody reader instructing Go HTTP client to not include
8// and body in the HTTP request.
9var noBodyReader = http.NoBody
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
new file mode 100644
index 0000000..a7365cd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
@@ -0,0 +1,14 @@
1// +build go1.7
2
3package request
4
5import "github.com/aws/aws-sdk-go/aws"
6
7// setContext updates the Request to use the passed in context for cancellation.
8// Context will also be used for request retry delay.
9//
10// Creates shallow copy of the http.Request with the WithContext method.
11func setRequestContext(r *Request, ctx aws.Context) {
12 r.context = ctx
13 r.HTTPRequest = r.HTTPRequest.WithContext(ctx)
14}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
new file mode 100644
index 0000000..307fa07
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
@@ -0,0 +1,14 @@
1// +build !go1.7
2
3package request
4
5import "github.com/aws/aws-sdk-go/aws"
6
7// setContext updates the Request to use the passed in context for cancellation.
8// Context will also be used for request retry delay.
9//
10// Creates shallow copy of the http.Request with the WithContext method.
11func setRequestContext(r *Request, ctx aws.Context) {
12 r.context = ctx
13 r.HTTPRequest.Cancel = ctx.Done()
14}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
new file mode 100644
index 0000000..59de673
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
@@ -0,0 +1,236 @@
1package request
2
3import (
4 "reflect"
5 "sync/atomic"
6
7 "github.com/aws/aws-sdk-go/aws"
8 "github.com/aws/aws-sdk-go/aws/awsutil"
9)
10
11// A Pagination provides paginating of SDK API operations which are paginatable.
12// Generally you should not use this type directly, but use the "Pages" API
13// operations method to automatically perform pagination for you. Such as,
14// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods.
15//
16// Pagination differs from a Paginator type in that pagination is the type that
17// does the pagination between API operations, and Paginator defines the
18// configuration that will be used per page request.
19//
20// cont := true
21// for p.Next() && cont {
22// data := p.Page().(*s3.ListObjectsOutput)
23// // process the page's data
24// }
25// return p.Err()
26//
27// See service client API operation Pages methods for examples how the SDK will
28// use the Pagination type.
29type Pagination struct {
30 // Function to return a Request value for each pagination request.
31 // Any configuration or handlers that need to be applied to the request
32 // prior to getting the next page should be done here before the request
33 // returned.
34 //
35 // NewRequest should always be built from the same API operations. It is
36 // undefined if different API operations are returned on subsequent calls.
37 NewRequest func() (*Request, error)
38
39 started bool
40 nextTokens []interface{}
41
42 err error
43 curPage interface{}
44}
45
46// HasNextPage will return true if Pagination is able to determine that the API
47// operation has additional pages. False will be returned if there are no more
48// pages remaining.
49//
50// Will always return true if Next has not been called yet.
51func (p *Pagination) HasNextPage() bool {
52 return !(p.started && len(p.nextTokens) == 0)
53}
54
55// Err returns the error Pagination encountered when retrieving the next page.
56func (p *Pagination) Err() error {
57 return p.err
58}
59
60// Page returns the current page. Page should only be called after a successful
61// call to Next. It is undefined what Page will return if Page is called after
62// Next returns false.
63func (p *Pagination) Page() interface{} {
64 return p.curPage
65}
66
67// Next will attempt to retrieve the next page for the API operation. When a page
68// is retrieved true will be returned. If the page cannot be retrieved, or there
69// are no more pages false will be returned.
70//
71// Use the Page method to retrieve the current page data. The data will need
72// to be cast to the API operation's output type.
73//
74// Use the Err method to determine if an error occurred if Page returns false.
75func (p *Pagination) Next() bool {
76 if !p.HasNextPage() {
77 return false
78 }
79
80 req, err := p.NewRequest()
81 if err != nil {
82 p.err = err
83 return false
84 }
85
86 if p.started {
87 for i, intok := range req.Operation.InputTokens {
88 awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i])
89 }
90 }
91 p.started = true
92
93 err = req.Send()
94 if err != nil {
95 p.err = err
96 return false
97 }
98
99 p.nextTokens = req.nextPageTokens()
100 p.curPage = req.Data
101
102 return true
103}
104
105// A Paginator is the configuration data that defines how an API operation
106// should be paginated. This type is used by the API service models to define
107// the generated pagination config for service APIs.
108//
109// The Pagination type is what provides iterating between pages of an API. It
110// is only used to store the token metadata the SDK should use for performing
111// pagination.
112type Paginator struct {
113 InputTokens []string
114 OutputTokens []string
115 LimitToken string
116 TruncationToken string
117}
118
119// nextPageTokens returns the tokens to use when asking for the next page of data.
120func (r *Request) nextPageTokens() []interface{} {
121 if r.Operation.Paginator == nil {
122 return nil
123 }
124 if r.Operation.TruncationToken != "" {
125 tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
126 if len(tr) == 0 {
127 return nil
128 }
129
130 switch v := tr[0].(type) {
131 case *bool:
132 if !aws.BoolValue(v) {
133 return nil
134 }
135 case bool:
136 if v == false {
137 return nil
138 }
139 }
140 }
141
142 tokens := []interface{}{}
143 tokenAdded := false
144 for _, outToken := range r.Operation.OutputTokens {
145 v, _ := awsutil.ValuesAtPath(r.Data, outToken)
146 if len(v) > 0 {
147 tokens = append(tokens, v[0])
148 tokenAdded = true
149 } else {
150 tokens = append(tokens, nil)
151 }
152 }
153 if !tokenAdded {
154 return nil
155 }
156
157 return tokens
158}
159
160// Ensure a deprecated item is only logged once instead of each time its used.
161func logDeprecatedf(logger aws.Logger, flag *int32, msg string) {
162 if logger == nil {
163 return
164 }
165 if atomic.CompareAndSwapInt32(flag, 0, 1) {
166 logger.Log(msg)
167 }
168}
169
170var (
171 logDeprecatedHasNextPage int32
172 logDeprecatedNextPage int32
173 logDeprecatedEachPage int32
174)
175
176// HasNextPage returns true if this request has more pages of data available.
177//
178// Deprecated Use Pagination type for configurable pagination of API operations
179func (r *Request) HasNextPage() bool {
180 logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage,
181 "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations")
182
183 return len(r.nextPageTokens()) > 0
184}
185
186// NextPage returns a new Request that can be executed to return the next
187// page of result data. Call .Send() on this request to execute it.
188//
189// Deprecated Use Pagination type for configurable pagination of API operations
190func (r *Request) NextPage() *Request {
191 logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage,
192 "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations")
193
194 tokens := r.nextPageTokens()
195 if len(tokens) == 0 {
196 return nil
197 }
198
199 data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
200 nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
201 for i, intok := range nr.Operation.InputTokens {
202 awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
203 }
204 return nr
205}
206
207// EachPage iterates over each page of a paginated request object. The fn
208// parameter should be a function with the following sample signature:
209//
210// func(page *T, lastPage bool) bool {
211// return true // return false to stop iterating
212// }
213//
214// Where "T" is the structure type matching the output structure of the given
215// operation. For example, a request object generated by
216// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
217// as the structure "T". The lastPage value represents whether the page is
218// the last page of data or not. The return value of this function should
219// return true to keep iterating or false to stop.
220//
221// Deprecated Use Pagination type for configurable pagination of API operations
222func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
223 logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage,
224 "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations")
225
226 for page := r; page != nil; page = page.NextPage() {
227 if err := page.Send(); err != nil {
228 return err
229 }
230 if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
231 return page.Error
232 }
233 }
234
235 return nil
236}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
new file mode 100644
index 0000000..7af81de
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -0,0 +1,154 @@
1package request
2
3import (
4 "time"
5
6 "github.com/aws/aws-sdk-go/aws"
7 "github.com/aws/aws-sdk-go/aws/awserr"
8)
9
10// Retryer is an interface to control retry logic for a given service.
11// The default implementation used by most services is the service.DefaultRetryer
12// structure, which contains basic retry logic using exponential backoff.
13type Retryer interface {
14 RetryRules(*Request) time.Duration
15 ShouldRetry(*Request) bool
16 MaxRetries() int
17}
18
19// WithRetryer sets a config Retryer value to the given Config returning it
20// for chaining.
21func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
22 cfg.Retryer = retryer
23 return cfg
24}
25
26// retryableCodes is a collection of service response codes which are retry-able
27// without any further action.
28var retryableCodes = map[string]struct{}{
29 "RequestError": {},
30 "RequestTimeout": {},
31 ErrCodeResponseTimeout: {},
32 "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
33}
34
35var throttleCodes = map[string]struct{}{
36 "ProvisionedThroughputExceededException": {},
37 "Throttling": {},
38 "ThrottlingException": {},
39 "RequestLimitExceeded": {},
40 "RequestThrottled": {},
41 "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once
42 "TooManyRequestsException": {}, // Lambda functions
43 "PriorRequestNotComplete": {}, // Route53
44}
45
46// credsExpiredCodes is a collection of error codes which signify the credentials
47// need to be refreshed. Expired tokens require refreshing of credentials, and
48// resigning before the request can be retried.
49var credsExpiredCodes = map[string]struct{}{
50 "ExpiredToken": {},
51 "ExpiredTokenException": {},
52 "RequestExpired": {}, // EC2 Only
53}
54
55func isCodeThrottle(code string) bool {
56 _, ok := throttleCodes[code]
57 return ok
58}
59
60func isCodeRetryable(code string) bool {
61 if _, ok := retryableCodes[code]; ok {
62 return true
63 }
64
65 return isCodeExpiredCreds(code)
66}
67
68func isCodeExpiredCreds(code string) bool {
69 _, ok := credsExpiredCodes[code]
70 return ok
71}
72
73var validParentCodes = map[string]struct{}{
74 ErrCodeSerialization: struct{}{},
75 ErrCodeRead: struct{}{},
76}
77
78func isNestedErrorRetryable(parentErr awserr.Error) bool {
79 if parentErr == nil {
80 return false
81 }
82
83 if _, ok := validParentCodes[parentErr.Code()]; !ok {
84 return false
85 }
86
87 err := parentErr.OrigErr()
88 if err == nil {
89 return false
90 }
91
92 if aerr, ok := err.(awserr.Error); ok {
93 return isCodeRetryable(aerr.Code())
94 }
95
96 return isErrConnectionReset(err)
97}
98
99// IsErrorRetryable returns whether the error is retryable, based on its Code.
100// Returns false if error is nil.
101func IsErrorRetryable(err error) bool {
102 if err != nil {
103 if aerr, ok := err.(awserr.Error); ok {
104 return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr)
105 }
106 }
107 return false
108}
109
110// IsErrorThrottle returns whether the error is to be throttled based on its code.
111// Returns false if error is nil.
112func IsErrorThrottle(err error) bool {
113 if err != nil {
114 if aerr, ok := err.(awserr.Error); ok {
115 return isCodeThrottle(aerr.Code())
116 }
117 }
118 return false
119}
120
121// IsErrorExpiredCreds returns whether the error code is a credential expiry error.
122// Returns false if error is nil.
123func IsErrorExpiredCreds(err error) bool {
124 if err != nil {
125 if aerr, ok := err.(awserr.Error); ok {
126 return isCodeExpiredCreds(aerr.Code())
127 }
128 }
129 return false
130}
131
132// IsErrorRetryable returns whether the error is retryable, based on its Code.
133// Returns false if the request has no Error set.
134//
135// Alias for the utility function IsErrorRetryable
136func (r *Request) IsErrorRetryable() bool {
137 return IsErrorRetryable(r.Error)
138}
139
140// IsErrorThrottle returns whether the error is to be throttled based on its code.
141// Returns false if the request has no Error set
142//
143// Alias for the utility function IsErrorThrottle
144func (r *Request) IsErrorThrottle() bool {
145 return IsErrorThrottle(r.Error)
146}
147
148// IsErrorExpired returns whether the error code is a credential expiry error.
149// Returns false if the request has no Error set.
150//
151// Alias for the utility function IsErrorExpiredCreds
152func (r *Request) IsErrorExpired() bool {
153 return IsErrorExpiredCreds(r.Error)
154}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
new file mode 100644
index 0000000..09a44eb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
@@ -0,0 +1,94 @@
1package request
2
3import (
4 "io"
5 "time"
6
7 "github.com/aws/aws-sdk-go/aws/awserr"
8)
9
10var timeoutErr = awserr.New(
11 ErrCodeResponseTimeout,
12 "read on body has reached the timeout limit",
13 nil,
14)
15
16type readResult struct {
17 n int
18 err error
19}
20
21// timeoutReadCloser will handle body reads that take too long.
22// We will return a ErrReadTimeout error if a timeout occurs.
23type timeoutReadCloser struct {
24 reader io.ReadCloser
25 duration time.Duration
26}
27
28// Read will spin off a goroutine to call the reader's Read method. We will
29// select on the timer's channel or the read's channel. Whoever completes first
30// will be returned.
31func (r *timeoutReadCloser) Read(b []byte) (int, error) {
32 timer := time.NewTimer(r.duration)
33 c := make(chan readResult, 1)
34
35 go func() {
36 n, err := r.reader.Read(b)
37 timer.Stop()
38 c <- readResult{n: n, err: err}
39 }()
40
41 select {
42 case data := <-c:
43 return data.n, data.err
44 case <-timer.C:
45 return 0, timeoutErr
46 }
47}
48
49func (r *timeoutReadCloser) Close() error {
50 return r.reader.Close()
51}
52
53const (
54 // HandlerResponseTimeout is what we use to signify the name of the
55 // response timeout handler.
56 HandlerResponseTimeout = "ResponseTimeoutHandler"
57)
58
59// adaptToResponseTimeoutError is a handler that will replace any top level error
60// to a ErrCodeResponseTimeout, if its child is that.
61func adaptToResponseTimeoutError(req *Request) {
62 if err, ok := req.Error.(awserr.Error); ok {
63 aerr, ok := err.OrigErr().(awserr.Error)
64 if ok && aerr.Code() == ErrCodeResponseTimeout {
65 req.Error = aerr
66 }
67 }
68}
69
70// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer.
71// This will allow for per read timeouts. If a timeout occurred, we will return the
72// ErrCodeResponseTimeout.
73//
74// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second)
75func WithResponseReadTimeout(duration time.Duration) Option {
76 return func(r *Request) {
77
78 var timeoutHandler = NamedHandler{
79 HandlerResponseTimeout,
80 func(req *Request) {
81 req.HTTPResponse.Body = &timeoutReadCloser{
82 reader: req.HTTPResponse.Body,
83 duration: duration,
84 }
85 }}
86
87 // remove the handler so we are not stomping over any new durations.
88 r.Handlers.Send.RemoveByName(HandlerResponseTimeout)
89 r.Handlers.Send.PushBackNamed(timeoutHandler)
90
91 r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError)
92 r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError)
93 }
94}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
new file mode 100644
index 0000000..2520286
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
@@ -0,0 +1,234 @@
1package request
2
3import (
4 "bytes"
5 "fmt"
6
7 "github.com/aws/aws-sdk-go/aws/awserr"
8)
9
10const (
11 // InvalidParameterErrCode is the error code for invalid parameters errors
12 InvalidParameterErrCode = "InvalidParameter"
13 // ParamRequiredErrCode is the error code for required parameter errors
14 ParamRequiredErrCode = "ParamRequiredError"
15 // ParamMinValueErrCode is the error code for fields with too low of a
16 // number value.
17 ParamMinValueErrCode = "ParamMinValueError"
18 // ParamMinLenErrCode is the error code for fields without enough elements.
19 ParamMinLenErrCode = "ParamMinLenError"
20)
21
22// Validator provides a way for types to perform validation logic on their
23// input values that external code can use to determine if a type's values
24// are valid.
25type Validator interface {
26 Validate() error
27}
28
29// An ErrInvalidParams provides wrapping of invalid parameter errors found when
30// validating API operation input parameters.
31type ErrInvalidParams struct {
32 // Context is the base context of the invalid parameter group.
33 Context string
34 errs []ErrInvalidParam
35}
36
37// Add adds a new invalid parameter error to the collection of invalid
38// parameters. The context of the invalid parameter will be updated to reflect
39// this collection.
40func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
41 err.SetContext(e.Context)
42 e.errs = append(e.errs, err)
43}
44
45// AddNested adds the invalid parameter errors from another ErrInvalidParams
46// value into this collection. The nested errors will have their nested context
47// updated and base context to reflect the merging.
48//
49// Use for nested validations errors.
50func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
51 for _, err := range nested.errs {
52 err.SetContext(e.Context)
53 err.AddNestedContext(nestedCtx)
54 e.errs = append(e.errs, err)
55 }
56}
57
58// Len returns the number of invalid parameter errors
59func (e ErrInvalidParams) Len() int {
60 return len(e.errs)
61}
62
63// Code returns the code of the error
64func (e ErrInvalidParams) Code() string {
65 return InvalidParameterErrCode
66}
67
68// Message returns the message of the error
69func (e ErrInvalidParams) Message() string {
70 return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
71}
72
73// Error returns the string formatted form of the invalid parameters.
74func (e ErrInvalidParams) Error() string {
75 w := &bytes.Buffer{}
76 fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
77
78 for _, err := range e.errs {
79 fmt.Fprintf(w, "- %s\n", err.Message())
80 }
81
82 return w.String()
83}
84
85// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
86func (e ErrInvalidParams) OrigErr() error {
87 return awserr.NewBatchError(
88 InvalidParameterErrCode, e.Message(), e.OrigErrs())
89}
90
91// OrigErrs returns a slice of the invalid parameters
92func (e ErrInvalidParams) OrigErrs() []error {
93 errs := make([]error, len(e.errs))
94 for i := 0; i < len(errs); i++ {
95 errs[i] = e.errs[i]
96 }
97
98 return errs
99}
100
101// An ErrInvalidParam represents an invalid parameter error type.
102type ErrInvalidParam interface {
103 awserr.Error
104
105 // Field name the error occurred on.
106 Field() string
107
108 // SetContext updates the context of the error.
109 SetContext(string)
110
111 // AddNestedContext updates the error's context to include a nested level.
112 AddNestedContext(string)
113}
114
115type errInvalidParam struct {
116 context string
117 nestedContext string
118 field string
119 code string
120 msg string
121}
122
123// Code returns the error code for the type of invalid parameter.
124func (e *errInvalidParam) Code() string {
125 return e.code
126}
127
128// Message returns the reason the parameter was invalid, and its context.
129func (e *errInvalidParam) Message() string {
130 return fmt.Sprintf("%s, %s.", e.msg, e.Field())
131}
132
133// Error returns the string version of the invalid parameter error.
134func (e *errInvalidParam) Error() string {
135 return fmt.Sprintf("%s: %s", e.code, e.Message())
136}
137
138// OrigErr returns nil, Implemented for awserr.Error interface.
139func (e *errInvalidParam) OrigErr() error {
140 return nil
141}
142
143// Field Returns the field and context the error occurred.
144func (e *errInvalidParam) Field() string {
145 field := e.context
146 if len(field) > 0 {
147 field += "."
148 }
149 if len(e.nestedContext) > 0 {
150 field += fmt.Sprintf("%s.", e.nestedContext)
151 }
152 field += e.field
153
154 return field
155}
156
157// SetContext updates the base context of the error.
158func (e *errInvalidParam) SetContext(ctx string) {
159 e.context = ctx
160}
161
162// AddNestedContext prepends a context to the field's path.
163func (e *errInvalidParam) AddNestedContext(ctx string) {
164 if len(e.nestedContext) == 0 {
165 e.nestedContext = ctx
166 } else {
167 e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
168 }
169
170}
171
172// An ErrParamRequired represents an required parameter error.
173type ErrParamRequired struct {
174 errInvalidParam
175}
176
177// NewErrParamRequired creates a new required parameter error.
178func NewErrParamRequired(field string) *ErrParamRequired {
179 return &ErrParamRequired{
180 errInvalidParam{
181 code: ParamRequiredErrCode,
182 field: field,
183 msg: fmt.Sprintf("missing required field"),
184 },
185 }
186}
187
188// An ErrParamMinValue represents a minimum value parameter error.
189type ErrParamMinValue struct {
190 errInvalidParam
191 min float64
192}
193
194// NewErrParamMinValue creates a new minimum value parameter error.
195func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
196 return &ErrParamMinValue{
197 errInvalidParam: errInvalidParam{
198 code: ParamMinValueErrCode,
199 field: field,
200 msg: fmt.Sprintf("minimum field value of %v", min),
201 },
202 min: min,
203 }
204}
205
206// MinValue returns the field's require minimum value.
207//
208// float64 is returned for both int and float min values.
209func (e *ErrParamMinValue) MinValue() float64 {
210 return e.min
211}
212
213// An ErrParamMinLen represents a minimum length parameter error.
214type ErrParamMinLen struct {
215 errInvalidParam
216 min int
217}
218
219// NewErrParamMinLen creates a new minimum length parameter error.
220func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
221 return &ErrParamMinLen{
222 errInvalidParam: errInvalidParam{
223 code: ParamMinValueErrCode,
224 field: field,
225 msg: fmt.Sprintf("minimum field size of %v", min),
226 },
227 min: min,
228 }
229}
230
231// MinLen returns the field's required minimum length.
232func (e *ErrParamMinLen) MinLen() int {
233 return e.min
234}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
new file mode 100644
index 0000000..22d2f80
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
@@ -0,0 +1,287 @@
1package request
2
3import (
4 "fmt"
5 "time"
6
7 "github.com/aws/aws-sdk-go/aws"
8 "github.com/aws/aws-sdk-go/aws/awserr"
9 "github.com/aws/aws-sdk-go/aws/awsutil"
10)
11
12// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when
13// the waiter's max attempts have been exhausted.
14const WaiterResourceNotReadyErrorCode = "ResourceNotReady"
15
16// A WaiterOption is a function that will update the Waiter value's fields to
17// configure the waiter.
18type WaiterOption func(*Waiter)
19
20// WithWaiterMaxAttempts returns the maximum number of times the waiter should
21// attempt to check the resource for the target state.
22func WithWaiterMaxAttempts(max int) WaiterOption {
23 return func(w *Waiter) {
24 w.MaxAttempts = max
25 }
26}
27
28// WaiterDelay will return a delay the waiter should pause between attempts to
29// check the resource state. The passed in attempt is the number of times the
30// Waiter has checked the resource state.
31//
32// Attempt is the number of attempts the Waiter has made checking the resource
33// state.
34type WaiterDelay func(attempt int) time.Duration
35
36// ConstantWaiterDelay returns a WaiterDelay that will always return a constant
37// delay the waiter should use between attempts. It ignores the number of
38// attempts made.
39func ConstantWaiterDelay(delay time.Duration) WaiterDelay {
40 return func(attempt int) time.Duration {
41 return delay
42 }
43}
44
45// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in.
46func WithWaiterDelay(delayer WaiterDelay) WaiterOption {
47 return func(w *Waiter) {
48 w.Delay = delayer
49 }
50}
51
52// WithWaiterLogger returns a waiter option to set the logger a waiter
53// should use to log warnings and errors to.
54func WithWaiterLogger(logger aws.Logger) WaiterOption {
55 return func(w *Waiter) {
56 w.Logger = logger
57 }
58}
59
60// WithWaiterRequestOptions returns a waiter option setting the request
61// options for each request the waiter makes. Appends to waiter's request
62// options already set.
63func WithWaiterRequestOptions(opts ...Option) WaiterOption {
64 return func(w *Waiter) {
65 w.RequestOptions = append(w.RequestOptions, opts...)
66 }
67}
68
69// A Waiter provides the functionality to perform a blocking call which will
70// wait for a resource state to be satisfied by a service.
71//
72// This type should not be used directly. The API operations provided in the
73// service packages prefixed with "WaitUntil" should be used instead.
74type Waiter struct {
75 Name string
76 Acceptors []WaiterAcceptor
77 Logger aws.Logger
78
79 MaxAttempts int
80 Delay WaiterDelay
81
82 RequestOptions []Option
83 NewRequest func([]Option) (*Request, error)
84}
85
86// ApplyOptions updates the waiter with the list of waiter options provided.
87func (w *Waiter) ApplyOptions(opts ...WaiterOption) {
88 for _, fn := range opts {
89 fn(w)
90 }
91}
92
93// WaiterState are states the waiter uses based on WaiterAcceptor definitions
94// to identify if the resource state the waiter is waiting on has occurred.
95type WaiterState int
96
97// String returns the string representation of the waiter state.
98func (s WaiterState) String() string {
99 switch s {
100 case SuccessWaiterState:
101 return "success"
102 case FailureWaiterState:
103 return "failure"
104 case RetryWaiterState:
105 return "retry"
106 default:
107 return "unknown waiter state"
108 }
109}
110
111// States the waiter acceptors will use to identify target resource states.
112const (
113 SuccessWaiterState WaiterState = iota // waiter successful
114 FailureWaiterState // waiter failed
115 RetryWaiterState // waiter needs to be retried
116)
117
118// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor
119// definition's Expected attribute.
120type WaiterMatchMode int
121
122// Modes the waiter will use when inspecting API response to identify target
123// resource states.
124const (
125 PathAllWaiterMatch WaiterMatchMode = iota // match on all paths
126 PathWaiterMatch // match on specific path
127 PathAnyWaiterMatch // match on any path
128 PathListWaiterMatch // match on list of paths
129 StatusWaiterMatch // match on status code
130 ErrorWaiterMatch // match on error
131)
132
133// String returns the string representation of the waiter match mode.
134func (m WaiterMatchMode) String() string {
135 switch m {
136 case PathAllWaiterMatch:
137 return "pathAll"
138 case PathWaiterMatch:
139 return "path"
140 case PathAnyWaiterMatch:
141 return "pathAny"
142 case PathListWaiterMatch:
143 return "pathList"
144 case StatusWaiterMatch:
145 return "status"
146 case ErrorWaiterMatch:
147 return "error"
148 default:
149 return "unknown waiter match mode"
150 }
151}
152
153// WaitWithContext will make requests for the API operation using NewRequest to
154// build API requests. The request's response will be compared against the
155// Waiter's Acceptors to determine the successful state of the resource the
156// waiter is inspecting.
157//
158// The passed in context must not be nil. If it is nil a panic will occur. The
159// Context will be used to cancel the waiter's pending requests and retry delays.
160// Use aws.BackgroundContext if no context is available.
161//
162// The waiter will continue until the target state defined by the Acceptors,
163// or the max attempts expires.
164//
165// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's
166// retryer ShouldRetry returns false. This normally will happen when the max
167// wait attempts expires.
168func (w Waiter) WaitWithContext(ctx aws.Context) error {
169
170 for attempt := 1; ; attempt++ {
171 req, err := w.NewRequest(w.RequestOptions)
172 if err != nil {
173 waiterLogf(w.Logger, "unable to create request %v", err)
174 return err
175 }
176 req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter"))
177 err = req.Send()
178
179 // See if any of the acceptors match the request's response, or error
180 for _, a := range w.Acceptors {
181 if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched {
182 return matchErr
183 }
184 }
185
186 // The Waiter should only check the resource state MaxAttempts times
187 // This is here instead of in the for loop above to prevent delaying
188 // unnecessary when the waiter will not retry.
189 if attempt == w.MaxAttempts {
190 break
191 }
192
193 // Delay to wait before inspecting the resource again
194 delay := w.Delay(attempt)
195 if sleepFn := req.Config.SleepDelay; sleepFn != nil {
196 // Support SleepDelay for backwards compatibility and testing
197 sleepFn(delay)
198 } else if err := aws.SleepWithContext(ctx, delay); err != nil {
199 return awserr.New(CanceledErrorCode, "waiter context canceled", err)
200 }
201 }
202
203 return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil)
204}
205
206// A WaiterAcceptor provides the information needed to wait for an API operation
207// to complete.
208type WaiterAcceptor struct {
209 State WaiterState
210 Matcher WaiterMatchMode
211 Argument string
212 Expected interface{}
213}
214
215// match returns if the acceptor found a match with the passed in request
216// or error. True is returned if the acceptor made a match, error is returned
217// if there was an error attempting to perform the match.
218func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) {
219 result := false
220 var vals []interface{}
221
222 switch a.Matcher {
223 case PathAllWaiterMatch, PathWaiterMatch:
224 // Require all matches to be equal for result to match
225 vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
226 if len(vals) == 0 {
227 break
228 }
229 result = true
230 for _, val := range vals {
231 if !awsutil.DeepEqual(val, a.Expected) {
232 result = false
233 break
234 }
235 }
236 case PathAnyWaiterMatch:
237 // Only a single match needs to equal for the result to match
238 vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
239 for _, val := range vals {
240 if awsutil.DeepEqual(val, a.Expected) {
241 result = true
242 break
243 }
244 }
245 case PathListWaiterMatch:
246 // ignored matcher
247 case StatusWaiterMatch:
248 s := a.Expected.(int)
249 result = s == req.HTTPResponse.StatusCode
250 case ErrorWaiterMatch:
251 if aerr, ok := err.(awserr.Error); ok {
252 result = aerr.Code() == a.Expected.(string)
253 }
254 default:
255 waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
256 name, a.Matcher)
257 }
258
259 if !result {
260 // If there was no matching result found there is nothing more to do
261 // for this response, retry the request.
262 return false, nil
263 }
264
265 switch a.State {
266 case SuccessWaiterState:
267 // waiter completed
268 return true, nil
269 case FailureWaiterState:
270 // Waiter failure state triggered
271 return true, awserr.New(WaiterResourceNotReadyErrorCode,
272 "failed waiting for successful resource state", err)
273 case RetryWaiterState:
274 // clear the error and retry the operation
275 return false, nil
276 default:
277 waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s",
278 name, a.State)
279 return false, nil
280 }
281}
282
283func waiterLogf(logger aws.Logger, msg string, args ...interface{}) {
284 if logger != nil {
285 logger.Log(fmt.Sprintf(msg, args...))
286 }
287}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
new file mode 100644
index 0000000..ea7b886
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
@@ -0,0 +1,273 @@
1/*
2Package session provides configuration for the SDK's service clients.
3
4Sessions can be shared across all service clients that share the same base
5configuration. The Session is built from the SDK's default configuration and
6request handlers.
7
8Sessions should be cached when possible, because creating a new Session will
9load all configuration values from the environment, and config files each time
10the Session is created. Sharing the Session value across all of your service
11clients will ensure the configuration is loaded the fewest number of times possible.
12
13Concurrency
14
15Sessions are safe to use concurrently as long as the Session is not being
16modified. The SDK will not modify the Session once the Session has been created.
17Creating service clients concurrently from a shared Session is safe.
18
19Sessions from Shared Config
20
21Sessions can be created using the method above that will only load the
22additional config if the AWS_SDK_LOAD_CONFIG environment variable is set.
23Alternatively you can explicitly create a Session with shared config enabled.
24To do this you can use NewSessionWithOptions to configure how the Session will
25be created. Using the NewSessionWithOptions with SharedConfigState set to
26SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG
27environment variable was set.
28
29Creating Sessions
30
31When creating Sessions optional aws.Config values can be passed in that will
32override the default, or loaded config values the Session is being created
33with. This allows you to provide additional, or case based, configuration
34as needed.
35
36By default NewSession will only load credentials from the shared credentials
37file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
38set to a truthy value the Session will be created from the configuration
39values from the shared config (~/.aws/config) and shared credentials
40(~/.aws/credentials) files. See the section Sessions from Shared Config for
41more information.
42
43Create a Session with the default config and request handlers. With credentials
44region, and profile loaded from the environment and shared config automatically.
45Requires the AWS_PROFILE to be set, or "default" is used.
46
47 // Create Session
48 sess := session.Must(session.NewSession())
49
50 // Create a Session with a custom region
51 sess := session.Must(session.NewSession(&aws.Config{
52 Region: aws.String("us-east-1"),
53 }))
54
55 // Create a S3 client instance from a session
56 sess := session.Must(session.NewSession())
57
58 svc := s3.New(sess)
59
60Create Session With Option Overrides
61
62In addition to NewSession, Sessions can be created using NewSessionWithOptions.
63This func allows you to control and override how the Session will be created
64through code instead of being driven by environment variables only.
65
66Use NewSessionWithOptions when you want to provide the config profile, or
67override the shared config state (AWS_SDK_LOAD_CONFIG).
68
69 // Equivalent to session.NewSession()
70 sess := session.Must(session.NewSessionWithOptions(session.Options{
71 // Options
72 }))
73
74 // Specify profile to load for the session's config
75 sess := session.Must(session.NewSessionWithOptions(session.Options{
76 Profile: "profile_name",
77 }))
78
79 // Specify profile for config and region for requests
80 sess := session.Must(session.NewSessionWithOptions(session.Options{
81 Config: aws.Config{Region: aws.String("us-east-1")},
82 Profile: "profile_name",
83 }))
84
85 // Force enable Shared Config support
86 sess := session.Must(session.NewSessionWithOptions(session.Options{
87 SharedConfigState: session.SharedConfigEnable,
88 }))
89
90Adding Handlers
91
92You can add handlers to a session for processing HTTP requests. All service
93clients that use the session inherit the handlers. For example, the following
94handler logs every request and its payload made by a service client:
95
96 // Create a session, and add additional handlers for all service
97 // clients created with the Session to inherit. Adds logging handler.
98 sess := session.Must(session.NewSession())
99
100 sess.Handlers.Send.PushFront(func(r *request.Request) {
101 // Log every request made and its payload
102 logger.Println("Request: %s/%s, Payload: %s",
103 r.ClientInfo.ServiceName, r.Operation, r.Params)
104 })
105
106Deprecated "New" function
107
108The New session function has been deprecated because it does not provide good
109way to return errors that occur when loading the configuration files and values.
110Because of this, NewSession was created so errors can be retrieved when
111creating a session fails.
112
113Shared Config Fields
114
115By default the SDK will only load the shared credentials file's (~/.aws/credentials)
116credentials values, and all other config is provided by the environment variables,
117SDK defaults, and user provided aws.Config values.
118
119If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
120option is used to create the Session the full shared config values will be
121loaded. This includes credentials, region, and support for assume role. In
122addition the Session will load its configuration from both the shared config
123file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
124files have the same format.
125
126If both config files are present the configuration from both files will be
127read. The Session will be created from configuration values from the shared
128credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config).
129
130Credentials are the values the SDK should use for authenticating requests with
131AWS Services. They arfrom a configuration file will need to include both
132aws_access_key_id and aws_secret_access_key must be provided together in the
133same file to be considered valid. The values will be ignored if not a complete
134group. aws_session_token is an optional field that can be provided if both of
135the other two fields are also provided.
136
137 aws_access_key_id = AKID
138 aws_secret_access_key = SECRET
139 aws_session_token = TOKEN
140
141Assume Role values allow you to configure the SDK to assume an IAM role using
142a set of credentials provided in a config file via the source_profile field.
143Both "role_arn" and "source_profile" are required. The SDK supports assuming
144a role with MFA token if the session option AssumeRoleTokenProvider
145is set.
146
147 role_arn = arn:aws:iam::<account_number>:role/<role_name>
148 source_profile = profile_with_creds
149 external_id = 1234
150 mfa_serial = <serial or mfa arn>
151 role_session_name = session_name
152
153Region is the region the SDK should use for looking up AWS service endpoints
154and signing requests.
155
156 region = us-east-1
157
158Assume Role with MFA token
159
160To create a session with support for assuming an IAM role with MFA set the
161session option AssumeRoleTokenProvider to a function that will prompt for the
162MFA token code when the SDK assumes the role and refreshes the role's credentials.
163This allows you to configure the SDK via the shared config to assumea role
164with MFA tokens.
165
166In order for the SDK to assume a role with MFA the SharedConfigState
167session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG
168environment variable set.
169
170The shared configuration instructs the SDK to assume an IAM role with MFA
171when the mfa_serial configuration field is set in the shared config
172(~/.aws/config) or shared credentials (~/.aws/credentials) file.
173
174If mfa_serial is set in the configuration, the SDK will assume the role, and
175the AssumeRoleTokenProvider session option is not set an an error will
176be returned when creating the session.
177
178 sess := session.Must(session.NewSessionWithOptions(session.Options{
179 AssumeRoleTokenProvider: stscreds.StdinTokenProvider,
180 }))
181
182 // Create service client value configured for credentials
183 // from assumed role.
184 svc := s3.New(sess)
185
186To setup assume role outside of a session see the stscrds.AssumeRoleProvider
187documentation.
188
189Environment Variables
190
191When a Session is created several environment variables can be set to adjust
192how the SDK functions, and what configuration data it loads when creating
193Sessions. All environment values are optional, but some values like credentials
194require multiple of the values to set or the partial values will be ignored.
195All environment variable values are strings unless otherwise noted.
196
197Environment configuration values. If set both Access Key ID and Secret Access
198Key must be provided. Session Token and optionally also be provided, but is
199not required.
200
201 # Access Key ID
202 AWS_ACCESS_KEY_ID=AKID
203 AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
204
205 # Secret Access Key
206 AWS_SECRET_ACCESS_KEY=SECRET
207 AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
208
209 # Session Token
210 AWS_SESSION_TOKEN=TOKEN
211
212Region value will instruct the SDK where to make service API requests to. If is
213not provided in the environment the region must be provided before a service
214client request is made.
215
216 AWS_REGION=us-east-1
217
218 # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
219 # and AWS_REGION is not also set.
220 AWS_DEFAULT_REGION=us-east-1
221
222Profile name the SDK should load use when loading shared config from the
223configuration files. If not provided "default" will be used as the profile name.
224
225 AWS_PROFILE=my_profile
226
227 # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
228 # and AWS_PROFILE is not also set.
229 AWS_DEFAULT_PROFILE=my_profile
230
231SDK load config instructs the SDK to load the shared config in addition to
232shared credentials. This also expands the configuration loaded so the shared
233credentials will have parity with the shared config file. This also enables
234Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
235env values as well.
236
237 AWS_SDK_LOAD_CONFIG=1
238
239Shared credentials file path can be set to instruct the SDK to use an alternative
240file for the shared credentials. If not set the file will be loaded from
241$HOME/.aws/credentials on Linux/Unix based systems, and
242%USERPROFILE%\.aws\credentials on Windows.
243
244 AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
245
246Shared config file path can be set to instruct the SDK to use an alternative
247file for the shared config. If not set the file will be loaded from
248$HOME/.aws/config on Linux/Unix based systems, and
249%USERPROFILE%\.aws\config on Windows.
250
251 AWS_CONFIG_FILE=$HOME/my_shared_config
252
253Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
254will use instead of the default system's root CA bundle. Use this only
255if you want to replace the CA bundle the SDK uses for TLS requests.
256
257 AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
258
259Enabling this option will attempt to merge the Transport into the SDK's HTTP
260client. If the client's Transport is not a http.Transport an error will be
261returned. If the Transport's TLS config is set this option will cause the SDK
262to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file
263contains multiple certificates all of them will be loaded.
264
265The Session option CustomCABundle is also available when creating sessions
266to also enable this feature. CustomCABundle session option field has priority
267over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
268
269Setting a custom HTTPClient in the aws.Config options will override this setting.
270To use this option and custom HTTP client, the HTTP client needs to be provided
271when creating the session. Not the service client.
272*/
273package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
new file mode 100644
index 0000000..e6278a7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -0,0 +1,208 @@
1package session
2
3import (
4 "os"
5 "path/filepath"
6 "strconv"
7
8 "github.com/aws/aws-sdk-go/aws/credentials"
9)
10
11// envConfig is a collection of environment values the SDK will read
12// setup config from. All environment values are optional. But some values
13// such as credentials require multiple values to be complete or the values
14// will be ignored.
15type envConfig struct {
16 // Environment configuration values. If set both Access Key ID and Secret Access
17 // Key must be provided. Session Token and optionally also be provided, but is
18 // not required.
19 //
20 // # Access Key ID
21 // AWS_ACCESS_KEY_ID=AKID
22 // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
23 //
24 // # Secret Access Key
25 // AWS_SECRET_ACCESS_KEY=SECRET
26 // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
27 //
28 // # Session Token
29 // AWS_SESSION_TOKEN=TOKEN
30 Creds credentials.Value
31
32 // Region value will instruct the SDK where to make service API requests to. If is
33 // not provided in the environment the region must be provided before a service
34 // client request is made.
35 //
36 // AWS_REGION=us-east-1
37 //
38 // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
39 // # and AWS_REGION is not also set.
40 // AWS_DEFAULT_REGION=us-east-1
41 Region string
42
43 // Profile name the SDK should load use when loading shared configuration from the
44 // shared configuration files. If not provided "default" will be used as the
45 // profile name.
46 //
47 // AWS_PROFILE=my_profile
48 //
49 // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
50 // # and AWS_PROFILE is not also set.
51 // AWS_DEFAULT_PROFILE=my_profile
52 Profile string
53
54 // SDK load config instructs the SDK to load the shared config in addition to
55 // shared credentials. This also expands the configuration loaded from the shared
56 // credentials to have parity with the shared config file. This also enables
57 // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
58 // env values as well.
59 //
60 // AWS_SDK_LOAD_CONFIG=1
61 EnableSharedConfig bool
62
63 // Shared credentials file path can be set to instruct the SDK to use an alternate
64 // file for the shared credentials. If not set the file will be loaded from
65 // $HOME/.aws/credentials on Linux/Unix based systems, and
66 // %USERPROFILE%\.aws\credentials on Windows.
67 //
68 // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
69 SharedCredentialsFile string
70
71 // Shared config file path can be set to instruct the SDK to use an alternate
72 // file for the shared config. If not set the file will be loaded from
73 // $HOME/.aws/config on Linux/Unix based systems, and
74 // %USERPROFILE%\.aws\config on Windows.
75 //
76 // AWS_CONFIG_FILE=$HOME/my_shared_config
77 SharedConfigFile string
78
79 // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file
80 // that the SDK will use instead of the the system's root CA bundle.
81 // Only use this if you want to configure the SDK to use a custom set
82 // of CAs.
83 //
84 // Enabling this option will attempt to merge the Transport
85 // into the SDK's HTTP client. If the client's Transport is
86 // not a http.Transport an error will be returned. If the
87 // Transport's TLS config is set this option will cause the
88 // SDK to overwrite the Transport's TLS config's RootCAs value.
89 //
90 // Setting a custom HTTPClient in the aws.Config options will override this setting.
91 // To use this option and custom HTTP client, the HTTP client needs to be provided
92 // when creating the session. Not the service client.
93 //
94 // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
95 CustomCABundle string
96}
97
98var (
99 credAccessEnvKey = []string{
100 "AWS_ACCESS_KEY_ID",
101 "AWS_ACCESS_KEY",
102 }
103 credSecretEnvKey = []string{
104 "AWS_SECRET_ACCESS_KEY",
105 "AWS_SECRET_KEY",
106 }
107 credSessionEnvKey = []string{
108 "AWS_SESSION_TOKEN",
109 }
110
111 regionEnvKeys = []string{
112 "AWS_REGION",
113 "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
114 }
115 profileEnvKeys = []string{
116 "AWS_PROFILE",
117 "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
118 }
119)
120
121// loadEnvConfig retrieves the SDK's environment configuration.
122// See `envConfig` for the values that will be retrieved.
123//
124// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
125// the shared SDK config will be loaded in addition to the SDK's specific
126// configuration values.
127func loadEnvConfig() envConfig {
128 enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
129 return envConfigLoad(enableSharedConfig)
130}
131
132// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
133// SDK shared config. See `envConfig` for the values that will be retrieved.
134//
135// Loads the shared configuration in addition to the SDK's specific configuration.
136// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
137// environment variable is set.
138func loadSharedEnvConfig() envConfig {
139 return envConfigLoad(true)
140}
141
142func envConfigLoad(enableSharedConfig bool) envConfig {
143 cfg := envConfig{}
144
145 cfg.EnableSharedConfig = enableSharedConfig
146
147 setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey)
148 setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
149 setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
150
151 // Require logical grouping of credentials
152 if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
153 cfg.Creds = credentials.Value{}
154 } else {
155 cfg.Creds.ProviderName = "EnvConfigCredentials"
156 }
157
158 regionKeys := regionEnvKeys
159 profileKeys := profileEnvKeys
160 if !cfg.EnableSharedConfig {
161 regionKeys = regionKeys[:1]
162 profileKeys = profileKeys[:1]
163 }
164
165 setFromEnvVal(&cfg.Region, regionKeys)
166 setFromEnvVal(&cfg.Profile, profileKeys)
167
168 cfg.SharedCredentialsFile = sharedCredentialsFilename()
169 cfg.SharedConfigFile = sharedConfigFilename()
170
171 cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
172
173 return cfg
174}
175
176func setFromEnvVal(dst *string, keys []string) {
177 for _, k := range keys {
178 if v := os.Getenv(k); len(v) > 0 {
179 *dst = v
180 break
181 }
182 }
183}
184
185func sharedCredentialsFilename() string {
186 if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 {
187 return name
188 }
189
190 return filepath.Join(userHomeDir(), ".aws", "credentials")
191}
192
193func sharedConfigFilename() string {
194 if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 {
195 return name
196 }
197
198 return filepath.Join(userHomeDir(), ".aws", "config")
199}
200
201func userHomeDir() string {
202 homeDir := os.Getenv("HOME") // *nix
203 if len(homeDir) == 0 { // windows
204 homeDir = os.Getenv("USERPROFILE")
205 }
206
207 return homeDir
208}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
new file mode 100644
index 0000000..4792d3a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -0,0 +1,590 @@
1package session
2
3import (
4 "crypto/tls"
5 "crypto/x509"
6 "fmt"
7 "io"
8 "io/ioutil"
9 "net/http"
10 "os"
11
12 "github.com/aws/aws-sdk-go/aws"
13 "github.com/aws/aws-sdk-go/aws/awserr"
14 "github.com/aws/aws-sdk-go/aws/client"
15 "github.com/aws/aws-sdk-go/aws/corehandlers"
16 "github.com/aws/aws-sdk-go/aws/credentials"
17 "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
18 "github.com/aws/aws-sdk-go/aws/defaults"
19 "github.com/aws/aws-sdk-go/aws/endpoints"
20 "github.com/aws/aws-sdk-go/aws/request"
21)
22
23// A Session provides a central location to create service clients from and
24// store configurations and request handlers for those services.
25//
26// Sessions are safe to create service clients concurrently, but it is not safe
27// to mutate the Session concurrently.
28//
29// The Session satisfies the service client's client.ClientConfigProvider.
30type Session struct {
31 Config *aws.Config
32 Handlers request.Handlers
33}
34
35// New creates a new instance of the handlers merging in the provided configs
36// on top of the SDK's default configurations. Once the Session is created it
37// can be mutated to modify the Config or Handlers. The Session is safe to be
38// read concurrently, but it should not be written to concurrently.
39//
40// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
41// method could now encounter an error when loading the configuration. When
42// The environment variable is set, and an error occurs, New will return a
43// session that will fail all requests reporting the error that occurred while
44// loading the session. Use NewSession to get the error when creating the
45// session.
46//
47// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
48// the shared config file (~/.aws/config) will also be loaded, in addition to
49// the shared credentials file (~/.aws/credentials). Values set in both the
50// shared config, and shared credentials will be taken from the shared
51// credentials file.
52//
53// Deprecated: Use NewSession functions to create sessions instead. NewSession
54// has the same functionality as New except an error can be returned when the
55// func is called instead of waiting to receive an error until a request is made.
56func New(cfgs ...*aws.Config) *Session {
57 // load initial config from environment
58 envCfg := loadEnvConfig()
59
60 if envCfg.EnableSharedConfig {
61 s, err := newSession(Options{}, envCfg, cfgs...)
62 if err != nil {
63 // Old session.New expected all errors to be discovered when
64 // a request is made, and would report the errors then. This
65 // needs to be replicated if an error occurs while creating
66 // the session.
67 msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
68 "Use session.NewSession to handle errors occurring during session creation."
69
70 // Session creation failed, need to report the error and prevent
71 // any requests from succeeding.
72 s = &Session{Config: defaults.Config()}
73 s.Config.MergeIn(cfgs...)
74 s.Config.Logger.Log("ERROR:", msg, "Error:", err)
75 s.Handlers.Validate.PushBack(func(r *request.Request) {
76 r.Error = err
77 })
78 }
79 return s
80 }
81
82 return deprecatedNewSession(cfgs...)
83}
84
85// NewSession returns a new Session created from SDK defaults, config files,
86// environment, and user provided config files. Once the Session is created
87// it can be mutated to modify the Config or Handlers. The Session is safe to
88// be read concurrently, but it should not be written to concurrently.
89//
90// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
91// the shared config file (~/.aws/config) will also be loaded in addition to
92// the shared credentials file (~/.aws/credentials). Values set in both the
93// shared config, and shared credentials will be taken from the shared
94// credentials file. Enabling the Shared Config will also allow the Session
95// to be built with retrieving credentials with AssumeRole set in the config.
96//
97// See the NewSessionWithOptions func for information on how to override or
98// control through code how the Session will be created. Such as specifying the
99// config profile, and controlling if shared config is enabled or not.
100func NewSession(cfgs ...*aws.Config) (*Session, error) {
101 opts := Options{}
102 opts.Config.MergeIn(cfgs...)
103
104 return NewSessionWithOptions(opts)
105}
106
107// SharedConfigState provides the ability to optionally override the state
108// of the session's creation based on the shared config being enabled or
109// disabled.
110type SharedConfigState int
111
112const (
113 // SharedConfigStateFromEnv does not override any state of the
114 // AWS_SDK_LOAD_CONFIG env var. It is the default value of the
115 // SharedConfigState type.
116 SharedConfigStateFromEnv SharedConfigState = iota
117
118 // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
119 // and disables the shared config functionality.
120 SharedConfigDisable
121
122 // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
123 // and enables the shared config functionality.
124 SharedConfigEnable
125)
126
127// Options provides the means to control how a Session is created and what
128// configuration values will be loaded.
129//
130type Options struct {
131 // Provides config values for the SDK to use when creating service clients
132 // and making API requests to services. Any value set in with this field
133 // will override the associated value provided by the SDK defaults,
134 // environment or config files where relevant.
135 //
136 // If not set, configuration values from from SDK defaults, environment,
137 // config will be used.
138 Config aws.Config
139
140 // Overrides the config profile the Session should be created from. If not
141 // set the value of the environment variable will be loaded (AWS_PROFILE,
142 // or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
143 //
144 // If not set and environment variables are not set the "default"
145 // (DefaultSharedConfigProfile) will be used as the profile to load the
146 // session config from.
147 Profile string
148
149 // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
150 // environment variable. By default a Session will be created using the
151 // value provided by the AWS_SDK_LOAD_CONFIG environment variable.
152 //
153 // Setting this value to SharedConfigEnable or SharedConfigDisable
154 // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
155 // and enable or disable the shared config functionality.
156 SharedConfigState SharedConfigState
157
158 // When the SDK's shared config is configured to assume a role with MFA
159 // this option is required in order to provide the mechanism that will
160 // retrieve the MFA token. There is no default value for this field. If
161 // it is not set an error will be returned when creating the session.
162 //
163 // This token provider will be called when ever the assumed role's
164 // credentials need to be refreshed. Within the context of service clients
165 // all sharing the same session the SDK will ensure calls to the token
166 // provider are atomic. When sharing a token provider across multiple
167 // sessions additional synchronization logic is needed to ensure the
168 // token providers do not introduce race conditions. It is recommend to
169 // share the session where possible.
170 //
171 // stscreds.StdinTokenProvider is a basic implementation that will prompt
172 // from stdin for the MFA token code.
173 //
174 // This field is only used if the shared configuration is enabled, and
175 // the config enables assume role wit MFA via the mfa_serial field.
176 AssumeRoleTokenProvider func() (string, error)
177
178 // Reader for a custom Credentials Authority (CA) bundle in PEM format that
179 // the SDK will use instead of the default system's root CA bundle. Use this
180 // only if you want to replace the CA bundle the SDK uses for TLS requests.
181 //
182 // Enabling this option will attempt to merge the Transport into the SDK's HTTP
183 // client. If the client's Transport is not a http.Transport an error will be
184 // returned. If the Transport's TLS config is set this option will cause the SDK
185 // to overwrite the Transport's TLS config's RootCAs value. If the CA
186 // bundle reader contains multiple certificates all of them will be loaded.
187 //
188 // The Session option CustomCABundle is also available when creating sessions
189 // to also enable this feature. CustomCABundle session option field has priority
190 // over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
191 CustomCABundle io.Reader
192}
193
194// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
195// environment, and user provided config files. This func uses the Options
196// values to configure how the Session is created.
197//
198// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
199// the shared config file (~/.aws/config) will also be loaded in addition to
200// the shared credentials file (~/.aws/credentials). Values set in both the
201// shared config, and shared credentials will be taken from the shared
202// credentials file. Enabling the Shared Config will also allow the Session
203// to be built with retrieving credentials with AssumeRole set in the config.
204//
205// // Equivalent to session.New
206// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
207//
208// // Specify profile to load for the session's config
209// sess := session.Must(session.NewSessionWithOptions(session.Options{
210// Profile: "profile_name",
211// }))
212//
213// // Specify profile for config and region for requests
214// sess := session.Must(session.NewSessionWithOptions(session.Options{
215// Config: aws.Config{Region: aws.String("us-east-1")},
216// Profile: "profile_name",
217// }))
218//
219// // Force enable Shared Config support
220// sess := session.Must(session.NewSessionWithOptions(session.Options{
221// SharedConfigState: session.SharedConfigEnable,
222// }))
223func NewSessionWithOptions(opts Options) (*Session, error) {
224 var envCfg envConfig
225 if opts.SharedConfigState == SharedConfigEnable {
226 envCfg = loadSharedEnvConfig()
227 } else {
228 envCfg = loadEnvConfig()
229 }
230
231 if len(opts.Profile) > 0 {
232 envCfg.Profile = opts.Profile
233 }
234
235 switch opts.SharedConfigState {
236 case SharedConfigDisable:
237 envCfg.EnableSharedConfig = false
238 case SharedConfigEnable:
239 envCfg.EnableSharedConfig = true
240 }
241
242 // Only use AWS_CA_BUNDLE if session option is not provided.
243 if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
244 f, err := os.Open(envCfg.CustomCABundle)
245 if err != nil {
246 return nil, awserr.New("LoadCustomCABundleError",
247 "failed to open custom CA bundle PEM file", err)
248 }
249 defer f.Close()
250 opts.CustomCABundle = f
251 }
252
253 return newSession(opts, envCfg, &opts.Config)
254}
255
256// Must is a helper function to ensure the Session is valid and there was no
257// error when calling a NewSession function.
258//
259// This helper is intended to be used in variable initialization to load the
260// Session and configuration at startup. Such as:
261//
262// var sess = session.Must(session.NewSession())
263func Must(sess *Session, err error) *Session {
264 if err != nil {
265 panic(err)
266 }
267
268 return sess
269}
270
271func deprecatedNewSession(cfgs ...*aws.Config) *Session {
272 cfg := defaults.Config()
273 handlers := defaults.Handlers()
274
275 // Apply the passed in configs so the configuration can be applied to the
276 // default credential chain
277 cfg.MergeIn(cfgs...)
278 if cfg.EndpointResolver == nil {
279 // An endpoint resolver is required for a session to be able to provide
280 // endpoints for service client configurations.
281 cfg.EndpointResolver = endpoints.DefaultResolver()
282 }
283 cfg.Credentials = defaults.CredChain(cfg, handlers)
284
285 // Reapply any passed in configs to override credentials if set
286 cfg.MergeIn(cfgs...)
287
288 s := &Session{
289 Config: cfg,
290 Handlers: handlers,
291 }
292
293 initHandlers(s)
294
295 return s
296}
297
298func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
299 cfg := defaults.Config()
300 handlers := defaults.Handlers()
301
302 // Get a merged version of the user provided config to determine if
303 // credentials were.
304 userCfg := &aws.Config{}
305 userCfg.MergeIn(cfgs...)
306
307 // Order config files will be loaded in with later files overwriting
308 // previous config file values.
309 cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
310 if !envCfg.EnableSharedConfig {
311 // The shared config file (~/.aws/config) is only loaded if instructed
312 // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
313 cfgFiles = cfgFiles[1:]
314 }
315
316 // Load additional config from file(s)
317 sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
318 if err != nil {
319 return nil, err
320 }
321
322 if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
323 return nil, err
324 }
325
326 s := &Session{
327 Config: cfg,
328 Handlers: handlers,
329 }
330
331 initHandlers(s)
332
333 // Setup HTTP client with custom cert bundle if enabled
334 if opts.CustomCABundle != nil {
335 if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil {
336 return nil, err
337 }
338 }
339
340 return s, nil
341}
342
343func loadCustomCABundle(s *Session, bundle io.Reader) error {
344 var t *http.Transport
345 switch v := s.Config.HTTPClient.Transport.(type) {
346 case *http.Transport:
347 t = v
348 default:
349 if s.Config.HTTPClient.Transport != nil {
350 return awserr.New("LoadCustomCABundleError",
351 "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil)
352 }
353 }
354 if t == nil {
355 t = &http.Transport{}
356 }
357
358 p, err := loadCertPool(bundle)
359 if err != nil {
360 return err
361 }
362 if t.TLSClientConfig == nil {
363 t.TLSClientConfig = &tls.Config{}
364 }
365 t.TLSClientConfig.RootCAs = p
366
367 s.Config.HTTPClient.Transport = t
368
369 return nil
370}
371
372func loadCertPool(r io.Reader) (*x509.CertPool, error) {
373 b, err := ioutil.ReadAll(r)
374 if err != nil {
375 return nil, awserr.New("LoadCustomCABundleError",
376 "failed to read custom CA bundle PEM file", err)
377 }
378
379 p := x509.NewCertPool()
380 if !p.AppendCertsFromPEM(b) {
381 return nil, awserr.New("LoadCustomCABundleError",
382 "failed to load custom CA bundle PEM file", err)
383 }
384
385 return p, nil
386}
387
388func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error {
389 // Merge in user provided configuration
390 cfg.MergeIn(userCfg)
391
392 // Region if not already set by user
393 if len(aws.StringValue(cfg.Region)) == 0 {
394 if len(envCfg.Region) > 0 {
395 cfg.WithRegion(envCfg.Region)
396 } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
397 cfg.WithRegion(sharedCfg.Region)
398 }
399 }
400
401 // Configure credentials if not already set
402 if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
403 if len(envCfg.Creds.AccessKeyID) > 0 {
404 cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
405 envCfg.Creds,
406 )
407 } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
408 cfgCp := *cfg
409 cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
410 sharedCfg.AssumeRoleSource.Creds,
411 )
412 if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
413 // AssumeRole Token provider is required if doing Assume Role
414 // with MFA.
415 return AssumeRoleTokenProviderNotSetError{}
416 }
417 cfg.Credentials = stscreds.NewCredentials(
418 &Session{
419 Config: &cfgCp,
420 Handlers: handlers.Copy(),
421 },
422 sharedCfg.AssumeRole.RoleARN,
423 func(opt *stscreds.AssumeRoleProvider) {
424 opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
425
426 // Assume role with external ID
427 if len(sharedCfg.AssumeRole.ExternalID) > 0 {
428 opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
429 }
430
431 // Assume role with MFA
432 if len(sharedCfg.AssumeRole.MFASerial) > 0 {
433 opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
434 opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
435 }
436 },
437 )
438 } else if len(sharedCfg.Creds.AccessKeyID) > 0 {
439 cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
440 sharedCfg.Creds,
441 )
442 } else {
443 // Fallback to default credentials provider, include mock errors
444 // for the credential chain so user can identify why credentials
445 // failed to be retrieved.
446 cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
447 VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
448 Providers: []credentials.Provider{
449 &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
450 &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
451 defaults.RemoteCredProvider(*cfg, handlers),
452 },
453 })
454 }
455 }
456
457 return nil
458}
459
460// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
461// MFAToken option is not set when shared config is configured load assume a
462// role with an MFA token.
463type AssumeRoleTokenProviderNotSetError struct{}
464
465// Code is the short id of the error.
466func (e AssumeRoleTokenProviderNotSetError) Code() string {
467 return "AssumeRoleTokenProviderNotSetError"
468}
469
470// Message is the description of the error
471func (e AssumeRoleTokenProviderNotSetError) Message() string {
472 return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
473}
474
475// OrigErr is the underlying error that caused the failure.
476func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
477 return nil
478}
479
480// Error satisfies the error interface.
481func (e AssumeRoleTokenProviderNotSetError) Error() string {
482 return awserr.SprintError(e.Code(), e.Message(), "", nil)
483}
484
485type credProviderError struct {
486 Err error
487}
488
489var emptyCreds = credentials.Value{}
490
491func (c credProviderError) Retrieve() (credentials.Value, error) {
492 return credentials.Value{}, c.Err
493}
494func (c credProviderError) IsExpired() bool {
495 return true
496}
497
498func initHandlers(s *Session) {
499 // Add the Validate parameter handler if it is not disabled.
500 s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
501 if !aws.BoolValue(s.Config.DisableParamValidation) {
502 s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
503 }
504}
505
506// Copy creates and returns a copy of the current Session, coping the config
507// and handlers. If any additional configs are provided they will be merged
508// on top of the Session's copied config.
509//
510// // Create a copy of the current Session, configured for the us-west-2 region.
511// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
512func (s *Session) Copy(cfgs ...*aws.Config) *Session {
513 newSession := &Session{
514 Config: s.Config.Copy(cfgs...),
515 Handlers: s.Handlers.Copy(),
516 }
517
518 initHandlers(newSession)
519
520 return newSession
521}
522
523// ClientConfig satisfies the client.ConfigProvider interface and is used to
524// configure the service client instances. Passing the Session to the service
525// client's constructor (New) will use this method to configure the client.
526func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
527 // Backwards compatibility, the error will be eaten if user calls ClientConfig
528 // directly. All SDK services will use ClientconfigWithError.
529 cfg, _ := s.clientConfigWithErr(serviceName, cfgs...)
530
531 return cfg
532}
533
534func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) {
535 s = s.Copy(cfgs...)
536
537 var resolved endpoints.ResolvedEndpoint
538 var err error
539
540 region := aws.StringValue(s.Config.Region)
541
542 if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 {
543 resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL))
544 resolved.SigningRegion = region
545 } else {
546 resolved, err = s.Config.EndpointResolver.EndpointFor(
547 serviceName, region,
548 func(opt *endpoints.Options) {
549 opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL)
550 opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack)
551
552 // Support the condition where the service is modeled but its
553 // endpoint metadata is not available.
554 opt.ResolveUnknownService = true
555 },
556 )
557 }
558
559 return client.Config{
560 Config: s.Config,
561 Handlers: s.Handlers,
562 Endpoint: resolved.URL,
563 SigningRegion: resolved.SigningRegion,
564 SigningName: resolved.SigningName,
565 }, err
566}
567
568// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
569// that the EndpointResolver will not be used to resolve the endpoint. The only
570// endpoint set must come from the aws.Config.Endpoint field.
571func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config {
572 s = s.Copy(cfgs...)
573
574 var resolved endpoints.ResolvedEndpoint
575
576 region := aws.StringValue(s.Config.Region)
577
578 if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
579 resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
580 resolved.SigningRegion = region
581 }
582
583 return client.Config{
584 Config: s.Config,
585 Handlers: s.Handlers,
586 Endpoint: resolved.URL,
587 SigningRegion: resolved.SigningRegion,
588 SigningName: resolved.SigningName,
589 }
590}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
new file mode 100644
index 0000000..b58076f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -0,0 +1,295 @@
1package session
2
3import (
4 "fmt"
5 "io/ioutil"
6
7 "github.com/aws/aws-sdk-go/aws/awserr"
8 "github.com/aws/aws-sdk-go/aws/credentials"
9 "github.com/go-ini/ini"
10)
11
12const (
13 // Static Credentials group
14 accessKeyIDKey = `aws_access_key_id` // group required
15 secretAccessKey = `aws_secret_access_key` // group required
16 sessionTokenKey = `aws_session_token` // optional
17
18 // Assume Role Credentials group
19 roleArnKey = `role_arn` // group required
20 sourceProfileKey = `source_profile` // group required
21 externalIDKey = `external_id` // optional
22 mfaSerialKey = `mfa_serial` // optional
23 roleSessionNameKey = `role_session_name` // optional
24
25 // Additional Config fields
26 regionKey = `region`
27
28 // DefaultSharedConfigProfile is the default profile to be used when
29 // loading configuration from the config files if another profile name
30 // is not provided.
31 DefaultSharedConfigProfile = `default`
32)
33
34type assumeRoleConfig struct {
35 RoleARN string
36 SourceProfile string
37 ExternalID string
38 MFASerial string
39 RoleSessionName string
40}
41
42// sharedConfig represents the configuration fields of the SDK config files.
43type sharedConfig struct {
44 // Credentials values from the config file. Both aws_access_key_id
45 // and aws_secret_access_key must be provided together in the same file
46 // to be considered valid. The values will be ignored if not a complete group.
47 // aws_session_token is an optional field that can be provided if both of the
48 // other two fields are also provided.
49 //
50 // aws_access_key_id
51 // aws_secret_access_key
52 // aws_session_token
53 Creds credentials.Value
54
55 AssumeRole assumeRoleConfig
56 AssumeRoleSource *sharedConfig
57
58 // Region is the region the SDK should use for looking up AWS service endpoints
59 // and signing requests.
60 //
61 // region
62 Region string
63}
64
65type sharedConfigFile struct {
66 Filename string
67 IniData *ini.File
68}
69
70// loadSharedConfig retrieves the configuration from the list of files
71// using the profile provided. The order the files are listed will determine
72// precedence. Values in subsequent files will overwrite values defined in
73// earlier files.
74//
75// For example, given two files A and B. Both define credentials. If the order
76// of the files are A then B, B's credential values will be used instead of A's.
77//
78// See sharedConfig.setFromFile for information how the config files
79// will be loaded.
80func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) {
81 if len(profile) == 0 {
82 profile = DefaultSharedConfigProfile
83 }
84
85 files, err := loadSharedConfigIniFiles(filenames)
86 if err != nil {
87 return sharedConfig{}, err
88 }
89
90 cfg := sharedConfig{}
91 if err = cfg.setFromIniFiles(profile, files); err != nil {
92 return sharedConfig{}, err
93 }
94
95 if len(cfg.AssumeRole.SourceProfile) > 0 {
96 if err := cfg.setAssumeRoleSource(profile, files); err != nil {
97 return sharedConfig{}, err
98 }
99 }
100
101 return cfg, nil
102}
103
104func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
105 files := make([]sharedConfigFile, 0, len(filenames))
106
107 for _, filename := range filenames {
108 b, err := ioutil.ReadFile(filename)
109 if err != nil {
110 // Skip files which can't be opened and read for whatever reason
111 continue
112 }
113
114 f, err := ini.Load(b)
115 if err != nil {
116 return nil, SharedConfigLoadError{Filename: filename}
117 }
118
119 files = append(files, sharedConfigFile{
120 Filename: filename, IniData: f,
121 })
122 }
123
124 return files, nil
125}
126
127func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
128 var assumeRoleSrc sharedConfig
129
130 // Multiple level assume role chains are not support
131 if cfg.AssumeRole.SourceProfile == origProfile {
132 assumeRoleSrc = *cfg
133 assumeRoleSrc.AssumeRole = assumeRoleConfig{}
134 } else {
135 err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files)
136 if err != nil {
137 return err
138 }
139 }
140
141 if len(assumeRoleSrc.Creds.AccessKeyID) == 0 {
142 return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN}
143 }
144
145 cfg.AssumeRoleSource = &assumeRoleSrc
146
147 return nil
148}
149
150func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
151 // Trim files from the list that don't exist.
152 for _, f := range files {
153 if err := cfg.setFromIniFile(profile, f); err != nil {
154 if _, ok := err.(SharedConfigProfileNotExistsError); ok {
155 // Ignore proviles missings
156 continue
157 }
158 return err
159 }
160 }
161
162 return nil
163}
164
165// setFromFile loads the configuration from the file using
166// the profile provided. A sharedConfig pointer type value is used so that
167// multiple config file loadings can be chained.
168//
169// Only loads complete logically grouped values, and will not set fields in cfg
170// for incomplete grouped values in the config. Such as credentials. For example
171// if a config file only includes aws_access_key_id but no aws_secret_access_key
172// the aws_access_key_id will be ignored.
173func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
174 section, err := file.IniData.GetSection(profile)
175 if err != nil {
176 // Fallback to to alternate profile name: profile <name>
177 section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
178 if err != nil {
179 return SharedConfigProfileNotExistsError{Profile: profile, Err: err}
180 }
181 }
182
183 // Shared Credentials
184 akid := section.Key(accessKeyIDKey).String()
185 secret := section.Key(secretAccessKey).String()
186 if len(akid) > 0 && len(secret) > 0 {
187 cfg.Creds = credentials.Value{
188 AccessKeyID: akid,
189 SecretAccessKey: secret,
190 SessionToken: section.Key(sessionTokenKey).String(),
191 ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
192 }
193 }
194
195 // Assume Role
196 roleArn := section.Key(roleArnKey).String()
197 srcProfile := section.Key(sourceProfileKey).String()
198 if len(roleArn) > 0 && len(srcProfile) > 0 {
199 cfg.AssumeRole = assumeRoleConfig{
200 RoleARN: roleArn,
201 SourceProfile: srcProfile,
202 ExternalID: section.Key(externalIDKey).String(),
203 MFASerial: section.Key(mfaSerialKey).String(),
204 RoleSessionName: section.Key(roleSessionNameKey).String(),
205 }
206 }
207
208 // Region
209 if v := section.Key(regionKey).String(); len(v) > 0 {
210 cfg.Region = v
211 }
212
213 return nil
214}
215
216// SharedConfigLoadError is an error for the shared config file failed to load.
217type SharedConfigLoadError struct {
218 Filename string
219 Err error
220}
221
222// Code is the short id of the error.
223func (e SharedConfigLoadError) Code() string {
224 return "SharedConfigLoadError"
225}
226
227// Message is the description of the error
228func (e SharedConfigLoadError) Message() string {
229 return fmt.Sprintf("failed to load config file, %s", e.Filename)
230}
231
232// OrigErr is the underlying error that caused the failure.
233func (e SharedConfigLoadError) OrigErr() error {
234 return e.Err
235}
236
237// Error satisfies the error interface.
238func (e SharedConfigLoadError) Error() string {
239 return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
240}
241
242// SharedConfigProfileNotExistsError is an error for the shared config when
243// the profile was not find in the config file.
244type SharedConfigProfileNotExistsError struct {
245 Profile string
246 Err error
247}
248
249// Code is the short id of the error.
250func (e SharedConfigProfileNotExistsError) Code() string {
251 return "SharedConfigProfileNotExistsError"
252}
253
254// Message is the description of the error
255func (e SharedConfigProfileNotExistsError) Message() string {
256 return fmt.Sprintf("failed to get profile, %s", e.Profile)
257}
258
259// OrigErr is the underlying error that caused the failure.
260func (e SharedConfigProfileNotExistsError) OrigErr() error {
261 return e.Err
262}
263
264// Error satisfies the error interface.
265func (e SharedConfigProfileNotExistsError) Error() string {
266 return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
267}
268
269// SharedConfigAssumeRoleError is an error for the shared config when the
270// profile contains assume role information, but that information is invalid
271// or not complete.
272type SharedConfigAssumeRoleError struct {
273 RoleARN string
274}
275
276// Code is the short id of the error.
277func (e SharedConfigAssumeRoleError) Code() string {
278 return "SharedConfigAssumeRoleError"
279}
280
281// Message is the description of the error
282func (e SharedConfigAssumeRoleError) Message() string {
283 return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials",
284 e.RoleARN)
285}
286
287// OrigErr is the underlying error that caused the failure.
288func (e SharedConfigAssumeRoleError) OrigErr() error {
289 return nil
290}
291
292// Error satisfies the error interface.
293func (e SharedConfigAssumeRoleError) Error() string {
294 return awserr.SprintError(e.Code(), e.Message(), "", nil)
295}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
new file mode 100644
index 0000000..244c86d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
@@ -0,0 +1,82 @@
1package v4
2
3import (
4 "net/http"
5 "strings"
6)
7
8// validator houses a set of rule needed for validation of a
9// string value
10type rules []rule
11
12// rule interface allows for more flexible rules and just simply
13// checks whether or not a value adheres to that rule
14type rule interface {
15 IsValid(value string) bool
16}
17
18// IsValid will iterate through all rules and see if any rules
19// apply to the value and supports nested rules
20func (r rules) IsValid(value string) bool {
21 for _, rule := range r {
22 if rule.IsValid(value) {
23 return true
24 }
25 }
26 return false
27}
28
29// mapRule generic rule for maps
30type mapRule map[string]struct{}
31
32// IsValid for the map rule satisfies whether it exists in the map
33func (m mapRule) IsValid(value string) bool {
34 _, ok := m[value]
35 return ok
36}
37
38// whitelist is a generic rule for whitelisting
39type whitelist struct {
40 rule
41}
42
43// IsValid for whitelist checks if the value is within the whitelist
44func (w whitelist) IsValid(value string) bool {
45 return w.rule.IsValid(value)
46}
47
48// blacklist is a generic rule for blacklisting
49type blacklist struct {
50 rule
51}
52
53// IsValid for whitelist checks if the value is within the whitelist
54func (b blacklist) IsValid(value string) bool {
55 return !b.rule.IsValid(value)
56}
57
58type patterns []string
59
60// IsValid for patterns checks each pattern and returns if a match has
61// been found
62func (p patterns) IsValid(value string) bool {
63 for _, pattern := range p {
64 if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
65 return true
66 }
67 }
68 return false
69}
70
71// inclusiveRules rules allow for rules to depend on one another
72type inclusiveRules []rule
73
74// IsValid will return true if all rules are true
75func (r inclusiveRules) IsValid(value string) bool {
76 for _, rule := range r {
77 if !rule.IsValid(value) {
78 return false
79 }
80 }
81 return true
82}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
new file mode 100644
index 0000000..6aa2ed2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
@@ -0,0 +1,7 @@
1package v4
2
3// WithUnsignedPayload will enable and set the UnsignedPayload field to
4// true of the signer.
5func WithUnsignedPayload(v4 *Signer) {
6 v4.UnsignedPayload = true
7}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
new file mode 100644
index 0000000..bd082e9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
@@ -0,0 +1,24 @@
1// +build go1.5
2
3package v4
4
5import (
6 "net/url"
7 "strings"
8)
9
10func getURIPath(u *url.URL) string {
11 var uri string
12
13 if len(u.Opaque) > 0 {
14 uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
15 } else {
16 uri = u.EscapedPath()
17 }
18
19 if len(uri) == 0 {
20 uri = "/"
21 }
22
23 return uri
24}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
new file mode 100644
index 0000000..434ac87
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -0,0 +1,761 @@
1// Package v4 implements signing for AWS V4 signer
2//
3// Provides request signing for request that need to be signed with
4// AWS V4 Signatures.
5//
6// Standalone Signer
7//
8// Generally using the signer outside of the SDK should not require any additional
9// logic when using Go v1.5 or higher. The signer does this by taking advantage
10// of the URL.EscapedPath method. If your request URI requires additional escaping
11// you many need to use the URL.Opaque to define what the raw URI should be sent
12// to the service as.
13//
14// The signer will first check the URL.Opaque field, and use its value if set.
15// The signer does require the URL.Opaque field to be set in the form of:
16//
17// "//<hostname>/<path>"
18//
19// // e.g.
20// "//example.com/some/path"
21//
22// The leading "//" and hostname are required or the URL.Opaque escaping will
23// not work correctly.
24//
25// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
26// method and using the returned value. If you're using Go v1.4 you must set
27// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
28// Go v1.5 the signer will fallback to URL.Path.
29//
30// AWS v4 signature validation requires that the canonical string's URI path
31// element must be the URI escaped form of the HTTP request's path.
32// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
33//
34// The Go HTTP client will perform escaping automatically on the request. Some
35// of these escaping may cause signature validation errors because the HTTP
36// request differs from the URI path or query that the signature was generated.
37// https://golang.org/pkg/net/url/#URL.EscapedPath
38//
39// Because of this, it is recommended that when using the signer outside of the
40// SDK that explicitly escaping the request prior to being signed is preferable,
41// and will help prevent signature validation errors. This can be done by setting
42// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
43// call URL.EscapedPath() if Opaque is not set.
44//
45// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
46// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
47// request URL. https://github.com/golang/go/issues/16847 points to a bug in
48// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP
49// message. URL.Opaque generally will force Go to make requests with absolute URL.
50// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
51// or url.EscapedPath will ignore the RawPath escaping.
52//
53// Test `TestStandaloneSign` provides a complete example of using the signer
54// outside of the SDK and pre-escaping the URI path.
55package v4
56
57import (
58 "bytes"
59 "crypto/hmac"
60 "crypto/sha256"
61 "encoding/hex"
62 "fmt"
63 "io"
64 "io/ioutil"
65 "net/http"
66 "net/url"
67 "sort"
68 "strconv"
69 "strings"
70 "time"
71
72 "github.com/aws/aws-sdk-go/aws"
73 "github.com/aws/aws-sdk-go/aws/credentials"
74 "github.com/aws/aws-sdk-go/aws/request"
75 "github.com/aws/aws-sdk-go/private/protocol/rest"
76)
77
78const (
79 authHeaderPrefix = "AWS4-HMAC-SHA256"
80 timeFormat = "20060102T150405Z"
81 shortTimeFormat = "20060102"
82
83 // emptyStringSHA256 is a SHA256 of an empty string
84 emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
85)
86
87var ignoredHeaders = rules{
88 blacklist{
89 mapRule{
90 "Authorization": struct{}{},
91 "User-Agent": struct{}{},
92 "X-Amzn-Trace-Id": struct{}{},
93 },
94 },
95}
96
97// requiredSignedHeaders is a whitelist for build canonical headers.
98var requiredSignedHeaders = rules{
99 whitelist{
100 mapRule{
101 "Cache-Control": struct{}{},
102 "Content-Disposition": struct{}{},
103 "Content-Encoding": struct{}{},
104 "Content-Language": struct{}{},
105 "Content-Md5": struct{}{},
106 "Content-Type": struct{}{},
107 "Expires": struct{}{},
108 "If-Match": struct{}{},
109 "If-Modified-Since": struct{}{},
110 "If-None-Match": struct{}{},
111 "If-Unmodified-Since": struct{}{},
112 "Range": struct{}{},
113 "X-Amz-Acl": struct{}{},
114 "X-Amz-Copy-Source": struct{}{},
115 "X-Amz-Copy-Source-If-Match": struct{}{},
116 "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
117 "X-Amz-Copy-Source-If-None-Match": struct{}{},
118 "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
119 "X-Amz-Copy-Source-Range": struct{}{},
120 "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
121 "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
122 "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
123 "X-Amz-Grant-Full-control": struct{}{},
124 "X-Amz-Grant-Read": struct{}{},
125 "X-Amz-Grant-Read-Acp": struct{}{},
126 "X-Amz-Grant-Write": struct{}{},
127 "X-Amz-Grant-Write-Acp": struct{}{},
128 "X-Amz-Metadata-Directive": struct{}{},
129 "X-Amz-Mfa": struct{}{},
130 "X-Amz-Request-Payer": struct{}{},
131 "X-Amz-Server-Side-Encryption": struct{}{},
132 "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
133 "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
134 "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
135 "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
136 "X-Amz-Storage-Class": struct{}{},
137 "X-Amz-Website-Redirect-Location": struct{}{},
138 },
139 },
140 patterns{"X-Amz-Meta-"},
141}
142
143// allowedHoisting is a whitelist for build query headers. The boolean value
144// represents whether or not it is a pattern.
145var allowedQueryHoisting = inclusiveRules{
146 blacklist{requiredSignedHeaders},
147 patterns{"X-Amz-"},
148}
149
150// Signer applies AWS v4 signing to given request. Use this to sign requests
151// that need to be signed with AWS V4 Signatures.
152type Signer struct {
153 // The authentication credentials the request will be signed against.
154 // This value must be set to sign requests.
155 Credentials *credentials.Credentials
156
157 // Sets the log level the signer should use when reporting information to
158 // the logger. If the logger is nil nothing will be logged. See
159 // aws.LogLevelType for more information on available logging levels
160 //
161 // By default nothing will be logged.
162 Debug aws.LogLevelType
163
164 // The logger loging information will be written to. If there the logger
165 // is nil, nothing will be logged.
166 Logger aws.Logger
167
168 // Disables the Signer's moving HTTP header key/value pairs from the HTTP
169 // request header to the request's query string. This is most commonly used
170 // with pre-signed requests preventing headers from being added to the
171 // request's query string.
172 DisableHeaderHoisting bool
173
174 // Disables the automatic escaping of the URI path of the request for the
175 // siganture's canonical string's path. For services that do not need additional
176 // escaping then use this to disable the signer escaping the path.
177 //
178 // S3 is an example of a service that does not need additional escaping.
179 //
180 // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
181 DisableURIPathEscaping bool
182
183 // Disales the automatical setting of the HTTP request's Body field with the
184 // io.ReadSeeker passed in to the signer. This is useful if you're using a
185 // custom wrapper around the body for the io.ReadSeeker and want to preserve
186 // the Body value on the Request.Body.
187 //
188 // This does run the risk of signing a request with a body that will not be
189 // sent in the request. Need to ensure that the underlying data of the Body
190 // values are the same.
191 DisableRequestBodyOverwrite bool
192
193 // currentTimeFn returns the time value which represents the current time.
194 // This value should only be used for testing. If it is nil the default
195 // time.Now will be used.
196 currentTimeFn func() time.Time
197
198 // UnsignedPayload will prevent signing of the payload. This will only
199 // work for services that have support for this.
200 UnsignedPayload bool
201}
202
203// NewSigner returns a Signer pointer configured with the credentials and optional
204// option values provided. If not options are provided the Signer will use its
205// default configuration.
206func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
207 v4 := &Signer{
208 Credentials: credentials,
209 }
210
211 for _, option := range options {
212 option(v4)
213 }
214
215 return v4
216}
217
218type signingCtx struct {
219 ServiceName string
220 Region string
221 Request *http.Request
222 Body io.ReadSeeker
223 Query url.Values
224 Time time.Time
225 ExpireTime time.Duration
226 SignedHeaderVals http.Header
227
228 DisableURIPathEscaping bool
229
230 credValues credentials.Value
231 isPresign bool
232 formattedTime string
233 formattedShortTime string
234 unsignedPayload bool
235
236 bodyDigest string
237 signedHeaders string
238 canonicalHeaders string
239 canonicalString string
240 credentialString string
241 stringToSign string
242 signature string
243 authorization string
244}
245
246// Sign signs AWS v4 requests with the provided body, service name, region the
247// request is made to, and time the request is signed at. The signTime allows
248// you to specify that a request is signed for the future, and cannot be
249// used until then.
250//
251// Returns a list of HTTP headers that were included in the signature or an
252// error if signing the request failed. Generally for signed requests this value
253// is not needed as the full request context will be captured by the http.Request
254// value. It is included for reference though.
255//
256// Sign will set the request's Body to be the `body` parameter passed in. If
257// the body is not already an io.ReadCloser, it will be wrapped within one. If
258// a `nil` body parameter passed to Sign, the request's Body field will be
259// also set to nil. Its important to note that this functionality will not
260// change the request's ContentLength of the request.
261//
262// Sign differs from Presign in that it will sign the request using HTTP
263// header values. This type of signing is intended for http.Request values that
264// will not be shared, or are shared in a way the header values on the request
265// will not be lost.
266//
267// The requests body is an io.ReadSeeker so the SHA256 of the body can be
268// generated. To bypass the signer computing the hash you can set the
269// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
270// only compute the hash if the request header value is empty.
271func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
272 return v4.signWithBody(r, body, service, region, 0, signTime)
273}
274
275// Presign signs AWS v4 requests with the provided body, service name, region
276// the request is made to, and time the request is signed at. The signTime
277// allows you to specify that a request is signed for the future, and cannot
278// be used until then.
279//
280// Returns a list of HTTP headers that were included in the signature or an
281// error if signing the request failed. For presigned requests these headers
282// and their values must be included on the HTTP request when it is made. This
283// is helpful to know what header values need to be shared with the party the
284// presigned request will be distributed to.
285//
286// Presign differs from Sign in that it will sign the request using query string
287// instead of header values. This allows you to share the Presigned Request's
288// URL with third parties, or distribute it throughout your system with minimal
289// dependencies.
290//
291// Presign also takes an exp value which is the duration the
292// signed request will be valid after the signing time. This is allows you to
293// set when the request will expire.
294//
295// The requests body is an io.ReadSeeker so the SHA256 of the body can be
296// generated. To bypass the signer computing the hash you can set the
297// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
298// only compute the hash if the request header value is empty.
299//
300// Presigning a S3 request will not compute the body's SHA256 hash by default.
301// This is done due to the general use case for S3 presigned URLs is to share
302// PUT/GET capabilities. If you would like to include the body's SHA256 in the
303// presigned request's signature you can set the "X-Amz-Content-Sha256"
304// HTTP header and that will be included in the request's signature.
305func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
306 return v4.signWithBody(r, body, service, region, exp, signTime)
307}
308
309func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
310 currentTimeFn := v4.currentTimeFn
311 if currentTimeFn == nil {
312 currentTimeFn = time.Now
313 }
314
315 ctx := &signingCtx{
316 Request: r,
317 Body: body,
318 Query: r.URL.Query(),
319 Time: signTime,
320 ExpireTime: exp,
321 isPresign: exp != 0,
322 ServiceName: service,
323 Region: region,
324 DisableURIPathEscaping: v4.DisableURIPathEscaping,
325 unsignedPayload: v4.UnsignedPayload,
326 }
327
328 for key := range ctx.Query {
329 sort.Strings(ctx.Query[key])
330 }
331
332 if ctx.isRequestSigned() {
333 ctx.Time = currentTimeFn()
334 ctx.handlePresignRemoval()
335 }
336
337 var err error
338 ctx.credValues, err = v4.Credentials.Get()
339 if err != nil {
340 return http.Header{}, err
341 }
342
343 ctx.assignAmzQueryValues()
344 ctx.build(v4.DisableHeaderHoisting)
345
346 // If the request is not presigned the body should be attached to it. This
347 // prevents the confusion of wanting to send a signed request without
348 // the body the request was signed for attached.
349 if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
350 var reader io.ReadCloser
351 if body != nil {
352 var ok bool
353 if reader, ok = body.(io.ReadCloser); !ok {
354 reader = ioutil.NopCloser(body)
355 }
356 }
357 r.Body = reader
358 }
359
360 if v4.Debug.Matches(aws.LogDebugWithSigning) {
361 v4.logSigningInfo(ctx)
362 }
363
364 return ctx.SignedHeaderVals, nil
365}
366
367func (ctx *signingCtx) handlePresignRemoval() {
368 if !ctx.isPresign {
369 return
370 }
371
372 // The credentials have expired for this request. The current signing
373 // is invalid, and needs to be request because the request will fail.
374 ctx.removePresign()
375
376 // Update the request's query string to ensure the values stays in
377 // sync in the case retrieving the new credentials fails.
378 ctx.Request.URL.RawQuery = ctx.Query.Encode()
379}
380
381func (ctx *signingCtx) assignAmzQueryValues() {
382 if ctx.isPresign {
383 ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
384 if ctx.credValues.SessionToken != "" {
385 ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
386 } else {
387 ctx.Query.Del("X-Amz-Security-Token")
388 }
389
390 return
391 }
392
393 if ctx.credValues.SessionToken != "" {
394 ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
395 }
396}
397
398// SignRequestHandler is a named request handler the SDK will use to sign
399// service client request with using the V4 signature.
400var SignRequestHandler = request.NamedHandler{
401 Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
402}
403
404// SignSDKRequest signs an AWS request with the V4 signature. This
405// request handler is bested used only with the SDK's built in service client's
406// API operation requests.
407//
408// This function should not be used on its on its own, but in conjunction with
409// an AWS service client's API operation call. To sign a standalone request
410// not created by a service client's API operation method use the "Sign" or
411// "Presign" functions of the "Signer" type.
412//
413// If the credentials of the request's config are set to
414// credentials.AnonymousCredentials the request will not be signed.
415func SignSDKRequest(req *request.Request) {
416 signSDKRequestWithCurrTime(req, time.Now)
417}
418
419// BuildNamedHandler will build a generic handler for signing.
420func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
421 return request.NamedHandler{
422 Name: name,
423 Fn: func(req *request.Request) {
424 signSDKRequestWithCurrTime(req, time.Now, opts...)
425 },
426 }
427}
428
429func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
430 // If the request does not need to be signed ignore the signing of the
431 // request if the AnonymousCredentials object is used.
432 if req.Config.Credentials == credentials.AnonymousCredentials {
433 return
434 }
435
436 region := req.ClientInfo.SigningRegion
437 if region == "" {
438 region = aws.StringValue(req.Config.Region)
439 }
440
441 name := req.ClientInfo.SigningName
442 if name == "" {
443 name = req.ClientInfo.ServiceName
444 }
445
446 v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
447 v4.Debug = req.Config.LogLevel.Value()
448 v4.Logger = req.Config.Logger
449 v4.DisableHeaderHoisting = req.NotHoist
450 v4.currentTimeFn = curTimeFn
451 if name == "s3" {
452 // S3 service should not have any escaping applied
453 v4.DisableURIPathEscaping = true
454 }
455 // Prevents setting the HTTPRequest's Body. Since the Body could be
456 // wrapped in a custom io.Closer that we do not want to be stompped
457 // on top of by the signer.
458 v4.DisableRequestBodyOverwrite = true
459 })
460
461 for _, opt := range opts {
462 opt(v4)
463 }
464
465 signingTime := req.Time
466 if !req.LastSignedAt.IsZero() {
467 signingTime = req.LastSignedAt
468 }
469
470 signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
471 name, region, req.ExpireTime, signingTime,
472 )
473 if err != nil {
474 req.Error = err
475 req.SignedHeaderVals = nil
476 return
477 }
478
479 req.SignedHeaderVals = signedHeaders
480 req.LastSignedAt = curTimeFn()
481}
482
483const logSignInfoMsg = `DEBUG: Request Signature:
484---[ CANONICAL STRING ]-----------------------------
485%s
486---[ STRING TO SIGN ]--------------------------------
487%s%s
488-----------------------------------------------------`
489const logSignedURLMsg = `
490---[ SIGNED URL ]------------------------------------
491%s`
492
493func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
494 signedURLMsg := ""
495 if ctx.isPresign {
496 signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
497 }
498 msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
499 v4.Logger.Log(msg)
500}
501
502func (ctx *signingCtx) build(disableHeaderHoisting bool) {
503 ctx.buildTime() // no depends
504 ctx.buildCredentialString() // no depends
505
506 unsignedHeaders := ctx.Request.Header
507 if ctx.isPresign {
508 if !disableHeaderHoisting {
509 urlValues := url.Values{}
510 urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
511 for k := range urlValues {
512 ctx.Query[k] = urlValues[k]
513 }
514 }
515 }
516
517 ctx.buildBodyDigest()
518 ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
519 ctx.buildCanonicalString() // depends on canon headers / signed headers
520 ctx.buildStringToSign() // depends on canon string
521 ctx.buildSignature() // depends on string to sign
522
523 if ctx.isPresign {
524 ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
525 } else {
526 parts := []string{
527 authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
528 "SignedHeaders=" + ctx.signedHeaders,
529 "Signature=" + ctx.signature,
530 }
531 ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
532 }
533}
534
535func (ctx *signingCtx) buildTime() {
536 ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
537 ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
538
539 if ctx.isPresign {
540 duration := int64(ctx.ExpireTime / time.Second)
541 ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
542 ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
543 } else {
544 ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
545 }
546}
547
548func (ctx *signingCtx) buildCredentialString() {
549 ctx.credentialString = strings.Join([]string{
550 ctx.formattedShortTime,
551 ctx.Region,
552 ctx.ServiceName,
553 "aws4_request",
554 }, "/")
555
556 if ctx.isPresign {
557 ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
558 }
559}
560
561func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
562 query := url.Values{}
563 unsignedHeaders := http.Header{}
564 for k, h := range header {
565 if r.IsValid(k) {
566 query[k] = h
567 } else {
568 unsignedHeaders[k] = h
569 }
570 }
571
572 return query, unsignedHeaders
573}
574func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
575 var headers []string
576 headers = append(headers, "host")
577 for k, v := range header {
578 canonicalKey := http.CanonicalHeaderKey(k)
579 if !r.IsValid(canonicalKey) {
580 continue // ignored header
581 }
582 if ctx.SignedHeaderVals == nil {
583 ctx.SignedHeaderVals = make(http.Header)
584 }
585
586 lowerCaseKey := strings.ToLower(k)
587 if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
588 // include additional values
589 ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
590 continue
591 }
592
593 headers = append(headers, lowerCaseKey)
594 ctx.SignedHeaderVals[lowerCaseKey] = v
595 }
596 sort.Strings(headers)
597
598 ctx.signedHeaders = strings.Join(headers, ";")
599
600 if ctx.isPresign {
601 ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
602 }
603
604 headerValues := make([]string, len(headers))
605 for i, k := range headers {
606 if k == "host" {
607 headerValues[i] = "host:" + ctx.Request.URL.Host
608 } else {
609 headerValues[i] = k + ":" +
610 strings.Join(ctx.SignedHeaderVals[k], ",")
611 }
612 }
613
614 ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
615}
616
617func (ctx *signingCtx) buildCanonicalString() {
618 ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
619
620 uri := getURIPath(ctx.Request.URL)
621
622 if !ctx.DisableURIPathEscaping {
623 uri = rest.EscapePath(uri, false)
624 }
625
626 ctx.canonicalString = strings.Join([]string{
627 ctx.Request.Method,
628 uri,
629 ctx.Request.URL.RawQuery,
630 ctx.canonicalHeaders + "\n",
631 ctx.signedHeaders,
632 ctx.bodyDigest,
633 }, "\n")
634}
635
636func (ctx *signingCtx) buildStringToSign() {
637 ctx.stringToSign = strings.Join([]string{
638 authHeaderPrefix,
639 ctx.formattedTime,
640 ctx.credentialString,
641 hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
642 }, "\n")
643}
644
645func (ctx *signingCtx) buildSignature() {
646 secret := ctx.credValues.SecretAccessKey
647 date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
648 region := makeHmac(date, []byte(ctx.Region))
649 service := makeHmac(region, []byte(ctx.ServiceName))
650 credentials := makeHmac(service, []byte("aws4_request"))
651 signature := makeHmac(credentials, []byte(ctx.stringToSign))
652 ctx.signature = hex.EncodeToString(signature)
653}
654
655func (ctx *signingCtx) buildBodyDigest() {
656 hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
657 if hash == "" {
658 if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") {
659 hash = "UNSIGNED-PAYLOAD"
660 } else if ctx.Body == nil {
661 hash = emptyStringSHA256
662 } else {
663 hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
664 }
665 if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
666 ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
667 }
668 }
669 ctx.bodyDigest = hash
670}
671
672// isRequestSigned returns if the request is currently signed or presigned
673func (ctx *signingCtx) isRequestSigned() bool {
674 if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
675 return true
676 }
677 if ctx.Request.Header.Get("Authorization") != "" {
678 return true
679 }
680
681 return false
682}
683
684// unsign removes signing flags for both signed and presigned requests.
685func (ctx *signingCtx) removePresign() {
686 ctx.Query.Del("X-Amz-Algorithm")
687 ctx.Query.Del("X-Amz-Signature")
688 ctx.Query.Del("X-Amz-Security-Token")
689 ctx.Query.Del("X-Amz-Date")
690 ctx.Query.Del("X-Amz-Expires")
691 ctx.Query.Del("X-Amz-Credential")
692 ctx.Query.Del("X-Amz-SignedHeaders")
693}
694
695func makeHmac(key []byte, data []byte) []byte {
696 hash := hmac.New(sha256.New, key)
697 hash.Write(data)
698 return hash.Sum(nil)
699}
700
701func makeSha256(data []byte) []byte {
702 hash := sha256.New()
703 hash.Write(data)
704 return hash.Sum(nil)
705}
706
707func makeSha256Reader(reader io.ReadSeeker) []byte {
708 hash := sha256.New()
709 start, _ := reader.Seek(0, 1)
710 defer reader.Seek(start, 0)
711
712 io.Copy(hash, reader)
713 return hash.Sum(nil)
714}
715
716const doubleSpaces = " "
717
718var doubleSpaceBytes = []byte(doubleSpaces)
719
720func stripExcessSpaces(headerVals []string) []string {
721 vals := make([]string, len(headerVals))
722 for i, str := range headerVals {
723 // Trim leading and trailing spaces
724 trimmed := strings.TrimSpace(str)
725
726 idx := strings.Index(trimmed, doubleSpaces)
727 var buf []byte
728 for idx > -1 {
729 // Multiple adjacent spaces found
730 if buf == nil {
731 // first time create the buffer
732 buf = []byte(trimmed)
733 }
734
735 stripToIdx := -1
736 for j := idx + 1; j < len(buf); j++ {
737 if buf[j] != ' ' {
738 buf = append(buf[:idx+1], buf[j:]...)
739 stripToIdx = j
740 break
741 }
742 }
743
744 if stripToIdx >= 0 {
745 idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes)
746 if idx >= 0 {
747 idx += stripToIdx
748 }
749 } else {
750 idx = -1
751 }
752 }
753
754 if buf != nil {
755 vals[i] = string(buf)
756 } else {
757 vals[i] = trimmed
758 }
759 }
760 return vals
761}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go
new file mode 100644
index 0000000..0e2d864
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go
@@ -0,0 +1,118 @@
1package aws
2
3import (
4 "io"
5 "sync"
6)
7
8// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
9// only be used with an io.Reader that is also an io.Seeker. Doing so may
10// cause request signature errors, or request body's not sent for GET, HEAD
11// and DELETE HTTP methods.
12//
13// Deprecated: Should only be used with io.ReadSeeker. If using for
14// S3 PutObject to stream content use s3manager.Uploader instead.
15func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
16 return ReaderSeekerCloser{r}
17}
18
19// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
20// io.Closer interfaces to the underlying object if they are available.
21type ReaderSeekerCloser struct {
22 r io.Reader
23}
24
25// Read reads from the reader up to size of p. The number of bytes read, and
26// error if it occurred will be returned.
27//
28// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
29//
30// Performs the same functionality as io.Reader Read
31func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
32 switch t := r.r.(type) {
33 case io.Reader:
34 return t.Read(p)
35 }
36 return 0, nil
37}
38
39// Seek sets the offset for the next Read to offset, interpreted according to
40// whence: 0 means relative to the origin of the file, 1 means relative to the
41// current offset, and 2 means relative to the end. Seek returns the new offset
42// and an error, if any.
43//
44// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
45func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
46 switch t := r.r.(type) {
47 case io.Seeker:
48 return t.Seek(offset, whence)
49 }
50 return int64(0), nil
51}
52
53// IsSeeker returns if the underlying reader is also a seeker.
54func (r ReaderSeekerCloser) IsSeeker() bool {
55 _, ok := r.r.(io.Seeker)
56 return ok
57}
58
59// Close closes the ReaderSeekerCloser.
60//
61// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
62func (r ReaderSeekerCloser) Close() error {
63 switch t := r.r.(type) {
64 case io.Closer:
65 return t.Close()
66 }
67 return nil
68}
69
70// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
71// Can be used with the s3manager.Downloader to download content to a buffer
72// in memory. Safe to use concurrently.
73type WriteAtBuffer struct {
74 buf []byte
75 m sync.Mutex
76
77 // GrowthCoeff defines the growth rate of the internal buffer. By
78 // default, the growth rate is 1, where expanding the internal
79 // buffer will allocate only enough capacity to fit the new expected
80 // length.
81 GrowthCoeff float64
82}
83
84// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
85// provided by buf.
86func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
87 return &WriteAtBuffer{buf: buf}
88}
89
90// WriteAt writes a slice of bytes to a buffer starting at the position provided
91// The number of bytes written will be returned, or error. Can overwrite previous
92// written slices if the write ats overlap.
93func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
94 pLen := len(p)
95 expLen := pos + int64(pLen)
96 b.m.Lock()
97 defer b.m.Unlock()
98 if int64(len(b.buf)) < expLen {
99 if int64(cap(b.buf)) < expLen {
100 if b.GrowthCoeff < 1 {
101 b.GrowthCoeff = 1
102 }
103 newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
104 copy(newBuf, b.buf)
105 b.buf = newBuf
106 }
107 b.buf = b.buf[:expLen]
108 }
109 copy(b.buf[pos:], p)
110 return pLen, nil
111}
112
113// Bytes returns a slice of bytes written to the buffer.
114func (b *WriteAtBuffer) Bytes() []byte {
115 b.m.Lock()
116 defer b.m.Unlock()
117 return b.buf
118}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go
new file mode 100644
index 0000000..6192b24
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go
@@ -0,0 +1,12 @@
1// +build go1.8
2
3package aws
4
5import "net/url"
6
7// URLHostname will extract the Hostname without port from the URL value.
8//
9// Wrapper of net/url#URL.Hostname for backwards Go version compatibility.
10func URLHostname(url *url.URL) string {
11 return url.Hostname()
12}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
new file mode 100644
index 0000000..0210d27
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
@@ -0,0 +1,29 @@
1// +build !go1.8
2
3package aws
4
5import (
6 "net/url"
7 "strings"
8)
9
10// URLHostname will extract the Hostname without port from the URL value.
11//
12// Copy of Go 1.8's net/url#URL.Hostname functionality.
13func URLHostname(url *url.URL) string {
14 return stripPort(url.Host)
15
16}
17
18// stripPort is copy of Go 1.8 url#URL.Hostname functionality.
19// https://golang.org/src/net/url/url.go
20func stripPort(hostport string) string {
21 colon := strings.IndexByte(hostport, ':')
22 if colon == -1 {
23 return hostport
24 }
25 if i := strings.IndexByte(hostport, ']'); i != -1 {
26 return strings.TrimPrefix(hostport[:i], "[")
27 }
28 return hostport[:colon]
29}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
new file mode 100644
index 0000000..129dad9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -0,0 +1,8 @@
1// Package aws provides core functionality for making requests to AWS services.
2package aws
3
4// SDKName is the name of this AWS SDK
5const SDKName = "aws-sdk-go"
6
7// SDKVersion is the version of this SDK
8const SDKVersion = "1.8.21"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
new file mode 100644
index 0000000..53831df
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
@@ -0,0 +1,75 @@
1package protocol
2
3import (
4 "crypto/rand"
5 "fmt"
6 "reflect"
7)
8
9// RandReader is the random reader the protocol package will use to read
10// random bytes from. This is exported for testing, and should not be used.
11var RandReader = rand.Reader
12
13const idempotencyTokenFillTag = `idempotencyToken`
14
15// CanSetIdempotencyToken returns true if the struct field should be
16// automatically populated with a Idempotency token.
17//
18// Only *string and string type fields that are tagged with idempotencyToken
19// which are not already set can be auto filled.
20func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
21 switch u := v.Interface().(type) {
22 // To auto fill an Idempotency token the field must be a string,
23 // tagged for auto fill, and have a zero value.
24 case *string:
25 return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
26 case string:
27 return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
28 }
29
30 return false
31}
32
33// GetIdempotencyToken returns a randomly generated idempotency token.
34func GetIdempotencyToken() string {
35 b := make([]byte, 16)
36 RandReader.Read(b)
37
38 return UUIDVersion4(b)
39}
40
41// SetIdempotencyToken will set the value provided with a Idempotency Token.
42// Given that the value can be set. Will panic if value is not setable.
43func SetIdempotencyToken(v reflect.Value) {
44 if v.Kind() == reflect.Ptr {
45 if v.IsNil() && v.CanSet() {
46 v.Set(reflect.New(v.Type().Elem()))
47 }
48 v = v.Elem()
49 }
50 v = reflect.Indirect(v)
51
52 if !v.CanSet() {
53 panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
54 }
55
56 b := make([]byte, 16)
57 _, err := rand.Read(b)
58 if err != nil {
59 // TODO handle error
60 return
61 }
62
63 v.Set(reflect.ValueOf(UUIDVersion4(b)))
64}
65
66// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
67func UUIDVersion4(u []byte) string {
68 // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
69 // 13th character is "4"
70 u[6] = (u[6] | 0x40) & 0x4F
71 // 17th character is "8", "9", "a", or "b"
72 u[8] = (u[8] | 0x80) & 0xBF
73
74 return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
75}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
new file mode 100644
index 0000000..18169f0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
@@ -0,0 +1,36 @@
1// Package query provides serialization of AWS query requests, and responses.
2package query
3
4//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
5
6import (
7 "net/url"
8
9 "github.com/aws/aws-sdk-go/aws/awserr"
10 "github.com/aws/aws-sdk-go/aws/request"
11 "github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
12)
13
14// BuildHandler is a named request handler for building query protocol requests
15var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build}
16
17// Build builds a request for an AWS Query service.
18func Build(r *request.Request) {
19 body := url.Values{
20 "Action": {r.Operation.Name},
21 "Version": {r.ClientInfo.APIVersion},
22 }
23 if err := queryutil.Parse(body, r.Params, false); err != nil {
24 r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
25 return
26 }
27
28 if r.ExpireTime == 0 {
29 r.HTTPRequest.Method = "POST"
30 r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
31 r.SetBufferBody([]byte(body.Encode()))
32 } else { // This is a pre-signed request
33 r.HTTPRequest.Method = "GET"
34 r.HTTPRequest.URL.RawQuery = body.Encode()
35 }
36}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
new file mode 100644
index 0000000..524ca95
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -0,0 +1,237 @@
1package queryutil
2
3import (
4 "encoding/base64"
5 "fmt"
6 "net/url"
7 "reflect"
8 "sort"
9 "strconv"
10 "strings"
11 "time"
12
13 "github.com/aws/aws-sdk-go/private/protocol"
14)
15
16// Parse parses an object i and fills a url.Values object. The isEC2 flag
17// indicates if this is the EC2 Query sub-protocol.
18func Parse(body url.Values, i interface{}, isEC2 bool) error {
19 q := queryParser{isEC2: isEC2}
20 return q.parseValue(body, reflect.ValueOf(i), "", "")
21}
22
23func elemOf(value reflect.Value) reflect.Value {
24 for value.Kind() == reflect.Ptr {
25 value = value.Elem()
26 }
27 return value
28}
29
30type queryParser struct {
31 isEC2 bool
32}
33
34func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
35 value = elemOf(value)
36
37 // no need to handle zero values
38 if !value.IsValid() {
39 return nil
40 }
41
42 t := tag.Get("type")
43 if t == "" {
44 switch value.Kind() {
45 case reflect.Struct:
46 t = "structure"
47 case reflect.Slice:
48 t = "list"
49 case reflect.Map:
50 t = "map"
51 }
52 }
53
54 switch t {
55 case "structure":
56 return q.parseStruct(v, value, prefix)
57 case "list":
58 return q.parseList(v, value, prefix, tag)
59 case "map":
60 return q.parseMap(v, value, prefix, tag)
61 default:
62 return q.parseScalar(v, value, prefix, tag)
63 }
64}
65
66func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
67 if !value.IsValid() {
68 return nil
69 }
70
71 t := value.Type()
72 for i := 0; i < value.NumField(); i++ {
73 elemValue := elemOf(value.Field(i))
74 field := t.Field(i)
75
76 if field.PkgPath != "" {
77 continue // ignore unexported fields
78 }
79 if field.Tag.Get("ignore") != "" {
80 continue
81 }
82
83 if protocol.CanSetIdempotencyToken(value.Field(i), field) {
84 token := protocol.GetIdempotencyToken()
85 elemValue = reflect.ValueOf(token)
86 }
87
88 var name string
89 if q.isEC2 {
90 name = field.Tag.Get("queryName")
91 }
92 if name == "" {
93 if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
94 name = field.Tag.Get("locationNameList")
95 } else if locName := field.Tag.Get("locationName"); locName != "" {
96 name = locName
97 }
98 if name != "" && q.isEC2 {
99 name = strings.ToUpper(name[0:1]) + name[1:]
100 }
101 }
102 if name == "" {
103 name = field.Name
104 }
105
106 if prefix != "" {
107 name = prefix + "." + name
108 }
109
110 if err := q.parseValue(v, elemValue, name, field.Tag); err != nil {
111 return err
112 }
113 }
114 return nil
115}
116
117func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
118 // If it's empty, generate an empty value
119 if !value.IsNil() && value.Len() == 0 {
120 v.Set(prefix, "")
121 return nil
122 }
123
124 // check for unflattened list member
125 if !q.isEC2 && tag.Get("flattened") == "" {
126 if listName := tag.Get("locationNameList"); listName == "" {
127 prefix += ".member"
128 } else {
129 prefix += "." + listName
130 }
131 }
132
133 for i := 0; i < value.Len(); i++ {
134 slicePrefix := prefix
135 if slicePrefix == "" {
136 slicePrefix = strconv.Itoa(i + 1)
137 } else {
138 slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
139 }
140 if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
141 return err
142 }
143 }
144 return nil
145}
146
147func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
148 // If it's empty, generate an empty value
149 if !value.IsNil() && value.Len() == 0 {
150 v.Set(prefix, "")
151 return nil
152 }
153
154 // check for unflattened list member
155 if !q.isEC2 && tag.Get("flattened") == "" {
156 prefix += ".entry"
157 }
158
159 // sort keys for improved serialization consistency.
160 // this is not strictly necessary for protocol support.
161 mapKeyValues := value.MapKeys()
162 mapKeys := map[string]reflect.Value{}
163 mapKeyNames := make([]string, len(mapKeyValues))
164 for i, mapKey := range mapKeyValues {
165 name := mapKey.String()
166 mapKeys[name] = mapKey
167 mapKeyNames[i] = name
168 }
169 sort.Strings(mapKeyNames)
170
171 for i, mapKeyName := range mapKeyNames {
172 mapKey := mapKeys[mapKeyName]
173 mapValue := value.MapIndex(mapKey)
174
175 kname := tag.Get("locationNameKey")
176 if kname == "" {
177 kname = "key"
178 }
179 vname := tag.Get("locationNameValue")
180 if vname == "" {
181 vname = "value"
182 }
183
184 // serialize key
185 var keyName string
186 if prefix == "" {
187 keyName = strconv.Itoa(i+1) + "." + kname
188 } else {
189 keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
190 }
191
192 if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
193 return err
194 }
195
196 // serialize value
197 var valueName string
198 if prefix == "" {
199 valueName = strconv.Itoa(i+1) + "." + vname
200 } else {
201 valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
202 }
203
204 if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
205 return err
206 }
207 }
208
209 return nil
210}
211
212func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
213 switch value := r.Interface().(type) {
214 case string:
215 v.Set(name, value)
216 case []byte:
217 if !r.IsNil() {
218 v.Set(name, base64.StdEncoding.EncodeToString(value))
219 }
220 case bool:
221 v.Set(name, strconv.FormatBool(value))
222 case int64:
223 v.Set(name, strconv.FormatInt(value, 10))
224 case int:
225 v.Set(name, strconv.Itoa(value))
226 case float64:
227 v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
228 case float32:
229 v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
230 case time.Time:
231 const ISO8601UTC = "2006-01-02T15:04:05Z"
232 v.Set(name, value.UTC().Format(ISO8601UTC))
233 default:
234 return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
235 }
236 return nil
237}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
new file mode 100644
index 0000000..e0f4d5a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
@@ -0,0 +1,35 @@
1package query
2
3//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
4
5import (
6 "encoding/xml"
7
8 "github.com/aws/aws-sdk-go/aws/awserr"
9 "github.com/aws/aws-sdk-go/aws/request"
10 "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
11)
12
13// UnmarshalHandler is a named request handler for unmarshaling query protocol requests
14var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal}
15
16// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata
17var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta}
18
19// Unmarshal unmarshals a response for an AWS Query service.
20func Unmarshal(r *request.Request) {
21 defer r.HTTPResponse.Body.Close()
22 if r.DataFilled() {
23 decoder := xml.NewDecoder(r.HTTPResponse.Body)
24 err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
25 if err != nil {
26 r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
27 return
28 }
29 }
30}
31
32// UnmarshalMeta unmarshals header response values for an AWS Query service.
33func UnmarshalMeta(r *request.Request) {
34 r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
35}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
new file mode 100644
index 0000000..f214296
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
@@ -0,0 +1,66 @@
1package query
2
3import (
4 "encoding/xml"
5 "io/ioutil"
6
7 "github.com/aws/aws-sdk-go/aws/awserr"
8 "github.com/aws/aws-sdk-go/aws/request"
9)
10
11type xmlErrorResponse struct {
12 XMLName xml.Name `xml:"ErrorResponse"`
13 Code string `xml:"Error>Code"`
14 Message string `xml:"Error>Message"`
15 RequestID string `xml:"RequestId"`
16}
17
18type xmlServiceUnavailableResponse struct {
19 XMLName xml.Name `xml:"ServiceUnavailableException"`
20}
21
22// UnmarshalErrorHandler is a name request handler to unmarshal request errors
23var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
24
25// UnmarshalError unmarshals an error response for an AWS Query service.
26func UnmarshalError(r *request.Request) {
27 defer r.HTTPResponse.Body.Close()
28
29 bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
30 if err != nil {
31 r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
32 return
33 }
34
35 // First check for specific error
36 resp := xmlErrorResponse{}
37 decodeErr := xml.Unmarshal(bodyBytes, &resp)
38 if decodeErr == nil {
39 reqID := resp.RequestID
40 if reqID == "" {
41 reqID = r.RequestID
42 }
43 r.Error = awserr.NewRequestFailure(
44 awserr.New(resp.Code, resp.Message, nil),
45 r.HTTPResponse.StatusCode,
46 reqID,
47 )
48 return
49 }
50
51 // Check for unhandled error
52 servUnavailResp := xmlServiceUnavailableResponse{}
53 unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
54 if unavailErr == nil {
55 r.Error = awserr.NewRequestFailure(
56 awserr.New("ServiceUnavailableException", "service is unavailable", nil),
57 r.HTTPResponse.StatusCode,
58 r.RequestID,
59 )
60 return
61 }
62
63 // Failed to retrieve any error message from the response body
64 r.Error = awserr.New("SerializationError",
65 "failed to decode query XML error response", decodeErr)
66}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
new file mode 100644
index 0000000..7161835
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
@@ -0,0 +1,290 @@
1// Package rest provides RESTful serialization of AWS requests and responses.
2package rest
3
4import (
5 "bytes"
6 "encoding/base64"
7 "encoding/json"
8 "fmt"
9 "io"
10 "net/http"
11 "net/url"
12 "path"
13 "reflect"
14 "strconv"
15 "strings"
16 "time"
17
18 "github.com/aws/aws-sdk-go/aws"
19 "github.com/aws/aws-sdk-go/aws/awserr"
20 "github.com/aws/aws-sdk-go/aws/request"
21)
22
23// RFC822 returns an RFC822 formatted timestamp for AWS protocols
24const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
25
26// Whether the byte value can be sent without escaping in AWS URLs
27var noEscape [256]bool
28
29var errValueNotSet = fmt.Errorf("value not set")
30
31func init() {
32 for i := 0; i < len(noEscape); i++ {
33 // AWS expects every character except these to be escaped
34 noEscape[i] = (i >= 'A' && i <= 'Z') ||
35 (i >= 'a' && i <= 'z') ||
36 (i >= '0' && i <= '9') ||
37 i == '-' ||
38 i == '.' ||
39 i == '_' ||
40 i == '~'
41 }
42}
43
44// BuildHandler is a named request handler for building rest protocol requests
45var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
46
47// Build builds the REST component of a service request.
48func Build(r *request.Request) {
49 if r.ParamsFilled() {
50 v := reflect.ValueOf(r.Params).Elem()
51 buildLocationElements(r, v, false)
52 buildBody(r, v)
53 }
54}
55
56// BuildAsGET builds the REST component of a service request with the ability to hoist
57// data from the body.
58func BuildAsGET(r *request.Request) {
59 if r.ParamsFilled() {
60 v := reflect.ValueOf(r.Params).Elem()
61 buildLocationElements(r, v, true)
62 buildBody(r, v)
63 }
64}
65
66func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) {
67 query := r.HTTPRequest.URL.Query()
68
69 // Setup the raw path to match the base path pattern. This is needed
70 // so that when the path is mutated a custom escaped version can be
71 // stored in RawPath that will be used by the Go client.
72 r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path
73
74 for i := 0; i < v.NumField(); i++ {
75 m := v.Field(i)
76 if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
77 continue
78 }
79
80 if m.IsValid() {
81 field := v.Type().Field(i)
82 name := field.Tag.Get("locationName")
83 if name == "" {
84 name = field.Name
85 }
86 if kind := m.Kind(); kind == reflect.Ptr {
87 m = m.Elem()
88 } else if kind == reflect.Interface {
89 if !m.Elem().IsValid() {
90 continue
91 }
92 }
93 if !m.IsValid() {
94 continue
95 }
96 if field.Tag.Get("ignore") != "" {
97 continue
98 }
99
100 var err error
101 switch field.Tag.Get("location") {
102 case "headers": // header maps
103 err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag)
104 case "header":
105 err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag)
106 case "uri":
107 err = buildURI(r.HTTPRequest.URL, m, name, field.Tag)
108 case "querystring":
109 err = buildQueryString(query, m, name, field.Tag)
110 default:
111 if buildGETQuery {
112 err = buildQueryString(query, m, name, field.Tag)
113 }
114 }
115 r.Error = err
116 }
117 if r.Error != nil {
118 return
119 }
120 }
121
122 r.HTTPRequest.URL.RawQuery = query.Encode()
123 if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) {
124 cleanPath(r.HTTPRequest.URL)
125 }
126}
127
128func buildBody(r *request.Request, v reflect.Value) {
129 if field, ok := v.Type().FieldByName("_"); ok {
130 if payloadName := field.Tag.Get("payload"); payloadName != "" {
131 pfield, _ := v.Type().FieldByName(payloadName)
132 if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
133 payload := reflect.Indirect(v.FieldByName(payloadName))
134 if payload.IsValid() && payload.Interface() != nil {
135 switch reader := payload.Interface().(type) {
136 case io.ReadSeeker:
137 r.SetReaderBody(reader)
138 case []byte:
139 r.SetBufferBody(reader)
140 case string:
141 r.SetStringBody(reader)
142 default:
143 r.Error = awserr.New("SerializationError",
144 "failed to encode REST request",
145 fmt.Errorf("unknown payload type %s", payload.Type()))
146 }
147 }
148 }
149 }
150 }
151}
152
153func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error {
154 str, err := convertType(v, tag)
155 if err == errValueNotSet {
156 return nil
157 } else if err != nil {
158 return awserr.New("SerializationError", "failed to encode REST request", err)
159 }
160
161 header.Add(name, str)
162
163 return nil
164}
165
166func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error {
167 prefix := tag.Get("locationName")
168 for _, key := range v.MapKeys() {
169 str, err := convertType(v.MapIndex(key), tag)
170 if err == errValueNotSet {
171 continue
172 } else if err != nil {
173 return awserr.New("SerializationError", "failed to encode REST request", err)
174
175 }
176
177 header.Add(prefix+key.String(), str)
178 }
179 return nil
180}
181
182func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error {
183 value, err := convertType(v, tag)
184 if err == errValueNotSet {
185 return nil
186 } else if err != nil {
187 return awserr.New("SerializationError", "failed to encode REST request", err)
188 }
189
190 u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
191 u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1)
192
193 u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1)
194 u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1)
195
196 return nil
197}
198
199func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error {
200 switch value := v.Interface().(type) {
201 case []*string:
202 for _, item := range value {
203 query.Add(name, *item)
204 }
205 case map[string]*string:
206 for key, item := range value {
207 query.Add(key, *item)
208 }
209 case map[string][]*string:
210 for key, items := range value {
211 for _, item := range items {
212 query.Add(key, *item)
213 }
214 }
215 default:
216 str, err := convertType(v, tag)
217 if err == errValueNotSet {
218 return nil
219 } else if err != nil {
220 return awserr.New("SerializationError", "failed to encode REST request", err)
221 }
222 query.Set(name, str)
223 }
224
225 return nil
226}
227
228func cleanPath(u *url.URL) {
229 hasSlash := strings.HasSuffix(u.Path, "/")
230
231 // clean up path, removing duplicate `/`
232 u.Path = path.Clean(u.Path)
233 u.RawPath = path.Clean(u.RawPath)
234
235 if hasSlash && !strings.HasSuffix(u.Path, "/") {
236 u.Path += "/"
237 u.RawPath += "/"
238 }
239}
240
241// EscapePath escapes part of a URL path in Amazon style
242func EscapePath(path string, encodeSep bool) string {
243 var buf bytes.Buffer
244 for i := 0; i < len(path); i++ {
245 c := path[i]
246 if noEscape[c] || (c == '/' && !encodeSep) {
247 buf.WriteByte(c)
248 } else {
249 fmt.Fprintf(&buf, "%%%02X", c)
250 }
251 }
252 return buf.String()
253}
254
255func convertType(v reflect.Value, tag reflect.StructTag) (string, error) {
256 v = reflect.Indirect(v)
257 if !v.IsValid() {
258 return "", errValueNotSet
259 }
260
261 var str string
262 switch value := v.Interface().(type) {
263 case string:
264 str = value
265 case []byte:
266 str = base64.StdEncoding.EncodeToString(value)
267 case bool:
268 str = strconv.FormatBool(value)
269 case int64:
270 str = strconv.FormatInt(value, 10)
271 case float64:
272 str = strconv.FormatFloat(value, 'f', -1, 64)
273 case time.Time:
274 str = value.UTC().Format(RFC822)
275 case aws.JSONValue:
276 b, err := json.Marshal(value)
277 if err != nil {
278 return "", err
279 }
280 if tag.Get("location") == "header" {
281 str = base64.StdEncoding.EncodeToString(b)
282 } else {
283 str = string(b)
284 }
285 default:
286 err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
287 return "", err
288 }
289 return str, nil
290}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
new file mode 100644
index 0000000..4366de2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
@@ -0,0 +1,45 @@
1package rest
2
3import "reflect"
4
5// PayloadMember returns the payload field member of i if there is one, or nil.
6func PayloadMember(i interface{}) interface{} {
7 if i == nil {
8 return nil
9 }
10
11 v := reflect.ValueOf(i).Elem()
12 if !v.IsValid() {
13 return nil
14 }
15 if field, ok := v.Type().FieldByName("_"); ok {
16 if payloadName := field.Tag.Get("payload"); payloadName != "" {
17 field, _ := v.Type().FieldByName(payloadName)
18 if field.Tag.Get("type") != "structure" {
19 return nil
20 }
21
22 payload := v.FieldByName(payloadName)
23 if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
24 return payload.Interface()
25 }
26 }
27 }
28 return nil
29}
30
31// PayloadType returns the type of a payload field member of i if there is one, or "".
32func PayloadType(i interface{}) string {
33 v := reflect.Indirect(reflect.ValueOf(i))
34 if !v.IsValid() {
35 return ""
36 }
37 if field, ok := v.Type().FieldByName("_"); ok {
38 if payloadName := field.Tag.Get("payload"); payloadName != "" {
39 if member, ok := v.Type().FieldByName(payloadName); ok {
40 return member.Tag.Get("type")
41 }
42 }
43 }
44 return ""
45}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
new file mode 100644
index 0000000..7a779ee
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
@@ -0,0 +1,227 @@
1package rest
2
3import (
4 "bytes"
5 "encoding/base64"
6 "encoding/json"
7 "fmt"
8 "io"
9 "io/ioutil"
10 "net/http"
11 "reflect"
12 "strconv"
13 "strings"
14 "time"
15
16 "github.com/aws/aws-sdk-go/aws"
17 "github.com/aws/aws-sdk-go/aws/awserr"
18 "github.com/aws/aws-sdk-go/aws/request"
19)
20
21// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
22var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
23
24// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
25var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
26
27// Unmarshal unmarshals the REST component of a response in a REST service.
28func Unmarshal(r *request.Request) {
29 if r.DataFilled() {
30 v := reflect.Indirect(reflect.ValueOf(r.Data))
31 unmarshalBody(r, v)
32 }
33}
34
35// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
36func UnmarshalMeta(r *request.Request) {
37 r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
38 if r.RequestID == "" {
39 // Alternative version of request id in the header
40 r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
41 }
42 if r.DataFilled() {
43 v := reflect.Indirect(reflect.ValueOf(r.Data))
44 unmarshalLocationElements(r, v)
45 }
46}
47
48func unmarshalBody(r *request.Request, v reflect.Value) {
49 if field, ok := v.Type().FieldByName("_"); ok {
50 if payloadName := field.Tag.Get("payload"); payloadName != "" {
51 pfield, _ := v.Type().FieldByName(payloadName)
52 if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
53 payload := v.FieldByName(payloadName)
54 if payload.IsValid() {
55 switch payload.Interface().(type) {
56 case []byte:
57 defer r.HTTPResponse.Body.Close()
58 b, err := ioutil.ReadAll(r.HTTPResponse.Body)
59 if err != nil {
60 r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
61 } else {
62 payload.Set(reflect.ValueOf(b))
63 }
64 case *string:
65 defer r.HTTPResponse.Body.Close()
66 b, err := ioutil.ReadAll(r.HTTPResponse.Body)
67 if err != nil {
68 r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
69 } else {
70 str := string(b)
71 payload.Set(reflect.ValueOf(&str))
72 }
73 default:
74 switch payload.Type().String() {
75 case "io.ReadCloser":
76 payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
77 case "io.ReadSeeker":
78 b, err := ioutil.ReadAll(r.HTTPResponse.Body)
79 if err != nil {
80 r.Error = awserr.New("SerializationError",
81 "failed to read response body", err)
82 return
83 }
84 payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b))))
85 default:
86 io.Copy(ioutil.Discard, r.HTTPResponse.Body)
87 defer r.HTTPResponse.Body.Close()
88 r.Error = awserr.New("SerializationError",
89 "failed to decode REST response",
90 fmt.Errorf("unknown payload type %s", payload.Type()))
91 }
92 }
93 }
94 }
95 }
96 }
97}
98
99func unmarshalLocationElements(r *request.Request, v reflect.Value) {
100 for i := 0; i < v.NumField(); i++ {
101 m, field := v.Field(i), v.Type().Field(i)
102 if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
103 continue
104 }
105
106 if m.IsValid() {
107 name := field.Tag.Get("locationName")
108 if name == "" {
109 name = field.Name
110 }
111
112 switch field.Tag.Get("location") {
113 case "statusCode":
114 unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
115 case "header":
116 err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag)
117 if err != nil {
118 r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
119 break
120 }
121 case "headers":
122 prefix := field.Tag.Get("locationName")
123 err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
124 if err != nil {
125 r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
126 break
127 }
128 }
129 }
130 if r.Error != nil {
131 return
132 }
133 }
134}
135
136func unmarshalStatusCode(v reflect.Value, statusCode int) {
137 if !v.IsValid() {
138 return
139 }
140
141 switch v.Interface().(type) {
142 case *int64:
143 s := int64(statusCode)
144 v.Set(reflect.ValueOf(&s))
145 }
146}
147
148func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
149 switch r.Interface().(type) {
150 case map[string]*string: // we only support string map value types
151 out := map[string]*string{}
152 for k, v := range headers {
153 k = http.CanonicalHeaderKey(k)
154 if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
155 out[k[len(prefix):]] = &v[0]
156 }
157 }
158 r.Set(reflect.ValueOf(out))
159 }
160 return nil
161}
162
163func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
164 isJSONValue := tag.Get("type") == "jsonvalue"
165 if isJSONValue {
166 if len(header) == 0 {
167 return nil
168 }
169 } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
170 return nil
171 }
172
173 switch v.Interface().(type) {
174 case *string:
175 v.Set(reflect.ValueOf(&header))
176 case []byte:
177 b, err := base64.StdEncoding.DecodeString(header)
178 if err != nil {
179 return err
180 }
181 v.Set(reflect.ValueOf(&b))
182 case *bool:
183 b, err := strconv.ParseBool(header)
184 if err != nil {
185 return err
186 }
187 v.Set(reflect.ValueOf(&b))
188 case *int64:
189 i, err := strconv.ParseInt(header, 10, 64)
190 if err != nil {
191 return err
192 }
193 v.Set(reflect.ValueOf(&i))
194 case *float64:
195 f, err := strconv.ParseFloat(header, 64)
196 if err != nil {
197 return err
198 }
199 v.Set(reflect.ValueOf(&f))
200 case *time.Time:
201 t, err := time.Parse(RFC822, header)
202 if err != nil {
203 return err
204 }
205 v.Set(reflect.ValueOf(&t))
206 case aws.JSONValue:
207 b := []byte(header)
208 var err error
209 if tag.Get("location") == "header" {
210 b, err = base64.StdEncoding.DecodeString(header)
211 if err != nil {
212 return err
213 }
214 }
215
216 m := aws.JSONValue{}
217 err = json.Unmarshal(b, &m)
218 if err != nil {
219 return err
220 }
221 v.Set(reflect.ValueOf(m))
222 default:
223 err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
224 return err
225 }
226 return nil
227}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
new file mode 100644
index 0000000..7bdf4c8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
@@ -0,0 +1,69 @@
1// Package restxml provides RESTful XML serialization of AWS
2// requests and responses.
3package restxml
4
5//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go
6//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
7
8import (
9 "bytes"
10 "encoding/xml"
11
12 "github.com/aws/aws-sdk-go/aws/awserr"
13 "github.com/aws/aws-sdk-go/aws/request"
14 "github.com/aws/aws-sdk-go/private/protocol/query"
15 "github.com/aws/aws-sdk-go/private/protocol/rest"
16 "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
17)
18
19// BuildHandler is a named request handler for building restxml protocol requests
20var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build}
21
22// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests
23var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal}
24
25// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata
26var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta}
27
28// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors
29var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError}
30
31// Build builds a request payload for the REST XML protocol.
32func Build(r *request.Request) {
33 rest.Build(r)
34
35 if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
36 var buf bytes.Buffer
37 err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf))
38 if err != nil {
39 r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err)
40 return
41 }
42 r.SetBufferBody(buf.Bytes())
43 }
44}
45
46// Unmarshal unmarshals a payload response for the REST XML protocol.
47func Unmarshal(r *request.Request) {
48 if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
49 defer r.HTTPResponse.Body.Close()
50 decoder := xml.NewDecoder(r.HTTPResponse.Body)
51 err := xmlutil.UnmarshalXML(r.Data, decoder, "")
52 if err != nil {
53 r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err)
54 return
55 }
56 } else {
57 rest.Unmarshal(r)
58 }
59}
60
61// UnmarshalMeta unmarshals response headers for the REST XML protocol.
62func UnmarshalMeta(r *request.Request) {
63 rest.UnmarshalMeta(r)
64}
65
66// UnmarshalError unmarshals a response error for the REST XML protocol.
67func UnmarshalError(r *request.Request) {
68 query.UnmarshalError(r)
69}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
new file mode 100644
index 0000000..da1a681
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
@@ -0,0 +1,21 @@
1package protocol
2
3import (
4 "io"
5 "io/ioutil"
6
7 "github.com/aws/aws-sdk-go/aws/request"
8)
9
10// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
11var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
12
13// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
14func UnmarshalDiscardBody(r *request.Request) {
15 if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
16 return
17 }
18
19 io.Copy(ioutil.Discard, r.HTTPResponse.Body)
20 r.HTTPResponse.Body.Close()
21}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
new file mode 100644
index 0000000..7091b45
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -0,0 +1,296 @@
1// Package xmlutil provides XML serialization of AWS requests and responses.
2package xmlutil
3
4import (
5 "encoding/base64"
6 "encoding/xml"
7 "fmt"
8 "reflect"
9 "sort"
10 "strconv"
11 "time"
12
13 "github.com/aws/aws-sdk-go/private/protocol"
14)
15
16// BuildXML will serialize params into an xml.Encoder.
17// Error will be returned if the serialization of any of the params or nested values fails.
18func BuildXML(params interface{}, e *xml.Encoder) error {
19 b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
20 root := NewXMLElement(xml.Name{})
21 if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
22 return err
23 }
24 for _, c := range root.Children {
25 for _, v := range c {
26 return StructToXML(e, v, false)
27 }
28 }
29 return nil
30}
31
32// Returns the reflection element of a value, if it is a pointer.
33func elemOf(value reflect.Value) reflect.Value {
34 for value.Kind() == reflect.Ptr {
35 value = value.Elem()
36 }
37 return value
38}
39
40// A xmlBuilder serializes values from Go code to XML
41type xmlBuilder struct {
42 encoder *xml.Encoder
43 namespaces map[string]string
44}
45
46// buildValue generic XMLNode builder for any type. Will build value for their specific type
47// struct, list, map, scalar.
48//
49// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
50// type is not provided reflect will be used to determine the value's type.
51func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
52 value = elemOf(value)
53 if !value.IsValid() { // no need to handle zero values
54 return nil
55 } else if tag.Get("location") != "" { // don't handle non-body location values
56 return nil
57 }
58
59 t := tag.Get("type")
60 if t == "" {
61 switch value.Kind() {
62 case reflect.Struct:
63 t = "structure"
64 case reflect.Slice:
65 t = "list"
66 case reflect.Map:
67 t = "map"
68 }
69 }
70
71 switch t {
72 case "structure":
73 if field, ok := value.Type().FieldByName("_"); ok {
74 tag = tag + reflect.StructTag(" ") + field.Tag
75 }
76 return b.buildStruct(value, current, tag)
77 case "list":
78 return b.buildList(value, current, tag)
79 case "map":
80 return b.buildMap(value, current, tag)
81 default:
82 return b.buildScalar(value, current, tag)
83 }
84}
85
86// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested
87// types are converted to XMLNodes also.
88func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
89 if !value.IsValid() {
90 return nil
91 }
92
93 fieldAdded := false
94
95 // unwrap payloads
96 if payload := tag.Get("payload"); payload != "" {
97 field, _ := value.Type().FieldByName(payload)
98 tag = field.Tag
99 value = elemOf(value.FieldByName(payload))
100
101 if !value.IsValid() {
102 return nil
103 }
104 }
105
106 child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
107
108 // there is an xmlNamespace associated with this struct
109 if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
110 ns := xml.Attr{
111 Name: xml.Name{Local: "xmlns"},
112 Value: uri,
113 }
114 if prefix != "" {
115 b.namespaces[prefix] = uri // register the namespace
116 ns.Name.Local = "xmlns:" + prefix
117 }
118
119 child.Attr = append(child.Attr, ns)
120 }
121
122 t := value.Type()
123 for i := 0; i < value.NumField(); i++ {
124 member := elemOf(value.Field(i))
125 field := t.Field(i)
126
127 if field.PkgPath != "" {
128 continue // ignore unexported fields
129 }
130 if field.Tag.Get("ignore") != "" {
131 continue
132 }
133
134 mTag := field.Tag
135 if mTag.Get("location") != "" { // skip non-body members
136 continue
137 }
138
139 if protocol.CanSetIdempotencyToken(value.Field(i), field) {
140 token := protocol.GetIdempotencyToken()
141 member = reflect.ValueOf(token)
142 }
143
144 memberName := mTag.Get("locationName")
145 if memberName == "" {
146 memberName = field.Name
147 mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
148 }
149 if err := b.buildValue(member, child, mTag); err != nil {
150 return err
151 }
152
153 fieldAdded = true
154 }
155
156 if fieldAdded { // only append this child if we have one ore more valid members
157 current.AddChild(child)
158 }
159
160 return nil
161}
162
163// buildList adds the value's list items to the current XMLNode as children nodes. All
164// nested values in the list are converted to XMLNodes also.
165func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
166 if value.IsNil() { // don't build omitted lists
167 return nil
168 }
169
170 // check for unflattened list member
171 flattened := tag.Get("flattened") != ""
172
173 xname := xml.Name{Local: tag.Get("locationName")}
174 if flattened {
175 for i := 0; i < value.Len(); i++ {
176 child := NewXMLElement(xname)
177 current.AddChild(child)
178 if err := b.buildValue(value.Index(i), child, ""); err != nil {
179 return err
180 }
181 }
182 } else {
183 list := NewXMLElement(xname)
184 current.AddChild(list)
185
186 for i := 0; i < value.Len(); i++ {
187 iname := tag.Get("locationNameList")
188 if iname == "" {
189 iname = "member"
190 }
191
192 child := NewXMLElement(xml.Name{Local: iname})
193 list.AddChild(child)
194 if err := b.buildValue(value.Index(i), child, ""); err != nil {
195 return err
196 }
197 }
198 }
199
200 return nil
201}
202
203// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
204// nested values in the map are converted to XMLNodes also.
205//
206// Error will be returned if it is unable to build the map's values into XMLNodes
207func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
208 if value.IsNil() { // don't build omitted maps
209 return nil
210 }
211
212 maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
213 current.AddChild(maproot)
214 current = maproot
215
216 kname, vname := "key", "value"
217 if n := tag.Get("locationNameKey"); n != "" {
218 kname = n
219 }
220 if n := tag.Get("locationNameValue"); n != "" {
221 vname = n
222 }
223
224 // sorting is not required for compliance, but it makes testing easier
225 keys := make([]string, value.Len())
226 for i, k := range value.MapKeys() {
227 keys[i] = k.String()
228 }
229 sort.Strings(keys)
230
231 for _, k := range keys {
232 v := value.MapIndex(reflect.ValueOf(k))
233
234 mapcur := current
235 if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
236 child := NewXMLElement(xml.Name{Local: "entry"})
237 mapcur.AddChild(child)
238 mapcur = child
239 }
240
241 kchild := NewXMLElement(xml.Name{Local: kname})
242 kchild.Text = k
243 vchild := NewXMLElement(xml.Name{Local: vname})
244 mapcur.AddChild(kchild)
245 mapcur.AddChild(vchild)
246
247 if err := b.buildValue(v, vchild, ""); err != nil {
248 return err
249 }
250 }
251
252 return nil
253}
254
255// buildScalar will convert the value into a string and append it as a attribute or child
256// of the current XMLNode.
257//
258// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
259//
260// Error will be returned if the value type is unsupported.
261func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
262 var str string
263 switch converted := value.Interface().(type) {
264 case string:
265 str = converted
266 case []byte:
267 if !value.IsNil() {
268 str = base64.StdEncoding.EncodeToString(converted)
269 }
270 case bool:
271 str = strconv.FormatBool(converted)
272 case int64:
273 str = strconv.FormatInt(converted, 10)
274 case int:
275 str = strconv.Itoa(converted)
276 case float64:
277 str = strconv.FormatFloat(converted, 'f', -1, 64)
278 case float32:
279 str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
280 case time.Time:
281 const ISO8601UTC = "2006-01-02T15:04:05Z"
282 str = converted.UTC().Format(ISO8601UTC)
283 default:
284 return fmt.Errorf("unsupported value for param %s: %v (%s)",
285 tag.Get("locationName"), value.Interface(), value.Type().Name())
286 }
287
288 xname := xml.Name{Local: tag.Get("locationName")}
289 if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
290 attr := xml.Attr{Name: xname, Value: str}
291 current.Attr = append(current.Attr, attr)
292 } else { // regular text node
293 current.AddChild(&XMLNode{Name: xname, Text: str})
294 }
295 return nil
296}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
new file mode 100644
index 0000000..8758462
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -0,0 +1,260 @@
1package xmlutil
2
3import (
4 "encoding/base64"
5 "encoding/xml"
6 "fmt"
7 "io"
8 "reflect"
9 "strconv"
10 "strings"
11 "time"
12)
13
14// UnmarshalXML deserializes an xml.Decoder into the container v. V
15// needs to match the shape of the XML expected to be decoded.
16// If the shape doesn't match unmarshaling will fail.
17func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
18 n, err := XMLToStruct(d, nil)
19 if err != nil {
20 return err
21 }
22 if n.Children != nil {
23 for _, root := range n.Children {
24 for _, c := range root {
25 if wrappedChild, ok := c.Children[wrapper]; ok {
26 c = wrappedChild[0] // pull out wrapped element
27 }
28
29 err = parse(reflect.ValueOf(v), c, "")
30 if err != nil {
31 if err == io.EOF {
32 return nil
33 }
34 return err
35 }
36 }
37 }
38 return nil
39 }
40 return nil
41}
42
43// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
44// will be used to determine the type from r.
45func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
46 rtype := r.Type()
47 if rtype.Kind() == reflect.Ptr {
48 rtype = rtype.Elem() // check kind of actual element type
49 }
50
51 t := tag.Get("type")
52 if t == "" {
53 switch rtype.Kind() {
54 case reflect.Struct:
55 t = "structure"
56 case reflect.Slice:
57 t = "list"
58 case reflect.Map:
59 t = "map"
60 }
61 }
62
63 switch t {
64 case "structure":
65 if field, ok := rtype.FieldByName("_"); ok {
66 tag = field.Tag
67 }
68 return parseStruct(r, node, tag)
69 case "list":
70 return parseList(r, node, tag)
71 case "map":
72 return parseMap(r, node, tag)
73 default:
74 return parseScalar(r, node, tag)
75 }
76}
77
78// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
79// types in the structure will also be deserialized.
80func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
81 t := r.Type()
82 if r.Kind() == reflect.Ptr {
83 if r.IsNil() { // create the structure if it's nil
84 s := reflect.New(r.Type().Elem())
85 r.Set(s)
86 r = s
87 }
88
89 r = r.Elem()
90 t = t.Elem()
91 }
92
93 // unwrap any payloads
94 if payload := tag.Get("payload"); payload != "" {
95 field, _ := t.FieldByName(payload)
96 return parseStruct(r.FieldByName(payload), node, field.Tag)
97 }
98
99 for i := 0; i < t.NumField(); i++ {
100 field := t.Field(i)
101 if c := field.Name[0:1]; strings.ToLower(c) == c {
102 continue // ignore unexported fields
103 }
104
105 // figure out what this field is called
106 name := field.Name
107 if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
108 name = field.Tag.Get("locationNameList")
109 } else if locName := field.Tag.Get("locationName"); locName != "" {
110 name = locName
111 }
112
113 // try to find the field by name in elements
114 elems := node.Children[name]
115
116 if elems == nil { // try to find the field in attributes
117 if val, ok := node.findElem(name); ok {
118 elems = []*XMLNode{{Text: val}}
119 }
120 }
121
122 member := r.FieldByName(field.Name)
123 for _, elem := range elems {
124 err := parse(member, elem, field.Tag)
125 if err != nil {
126 return err
127 }
128 }
129 }
130 return nil
131}
132
133// parseList deserializes a list of values from an XML node. Each list entry
134// will also be deserialized.
135func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
136 t := r.Type()
137
138 if tag.Get("flattened") == "" { // look at all item entries
139 mname := "member"
140 if name := tag.Get("locationNameList"); name != "" {
141 mname = name
142 }
143
144 if Children, ok := node.Children[mname]; ok {
145 if r.IsNil() {
146 r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
147 }
148
149 for i, c := range Children {
150 err := parse(r.Index(i), c, "")
151 if err != nil {
152 return err
153 }
154 }
155 }
156 } else { // flattened list means this is a single element
157 if r.IsNil() {
158 r.Set(reflect.MakeSlice(t, 0, 0))
159 }
160
161 childR := reflect.Zero(t.Elem())
162 r.Set(reflect.Append(r, childR))
163 err := parse(r.Index(r.Len()-1), node, "")
164 if err != nil {
165 return err
166 }
167 }
168
169 return nil
170}
171
172// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
173// will also be deserialized as map entries.
174func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
175 if r.IsNil() {
176 r.Set(reflect.MakeMap(r.Type()))
177 }
178
179 if tag.Get("flattened") == "" { // look at all child entries
180 for _, entry := range node.Children["entry"] {
181 parseMapEntry(r, entry, tag)
182 }
183 } else { // this element is itself an entry
184 parseMapEntry(r, node, tag)
185 }
186
187 return nil
188}
189
190// parseMapEntry deserializes a map entry from a XML node.
191func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
192 kname, vname := "key", "value"
193 if n := tag.Get("locationNameKey"); n != "" {
194 kname = n
195 }
196 if n := tag.Get("locationNameValue"); n != "" {
197 vname = n
198 }
199
200 keys, ok := node.Children[kname]
201 values := node.Children[vname]
202 if ok {
203 for i, key := range keys {
204 keyR := reflect.ValueOf(key.Text)
205 value := values[i]
206 valueR := reflect.New(r.Type().Elem()).Elem()
207
208 parse(valueR, value, "")
209 r.SetMapIndex(keyR, valueR)
210 }
211 }
212 return nil
213}
214
215// parseScaller deserializes an XMLNode value into a concrete type based on the
216// interface type of r.
217//
218// Error is returned if the deserialization fails due to invalid type conversion,
219// or unsupported interface type.
220func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
221 switch r.Interface().(type) {
222 case *string:
223 r.Set(reflect.ValueOf(&node.Text))
224 return nil
225 case []byte:
226 b, err := base64.StdEncoding.DecodeString(node.Text)
227 if err != nil {
228 return err
229 }
230 r.Set(reflect.ValueOf(b))
231 case *bool:
232 v, err := strconv.ParseBool(node.Text)
233 if err != nil {
234 return err
235 }
236 r.Set(reflect.ValueOf(&v))
237 case *int64:
238 v, err := strconv.ParseInt(node.Text, 10, 64)
239 if err != nil {
240 return err
241 }
242 r.Set(reflect.ValueOf(&v))
243 case *float64:
244 v, err := strconv.ParseFloat(node.Text, 64)
245 if err != nil {
246 return err
247 }
248 r.Set(reflect.ValueOf(&v))
249 case *time.Time:
250 const ISO8601UTC = "2006-01-02T15:04:05Z"
251 t, err := time.Parse(ISO8601UTC, node.Text)
252 if err != nil {
253 return err
254 }
255 r.Set(reflect.ValueOf(&t))
256 default:
257 return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
258 }
259 return nil
260}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
new file mode 100644
index 0000000..3e970b6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
@@ -0,0 +1,147 @@
1package xmlutil
2
3import (
4 "encoding/xml"
5 "fmt"
6 "io"
7 "sort"
8)
9
10// A XMLNode contains the values to be encoded or decoded.
11type XMLNode struct {
12 Name xml.Name `json:",omitempty"`
13 Children map[string][]*XMLNode `json:",omitempty"`
14 Text string `json:",omitempty"`
15 Attr []xml.Attr `json:",omitempty"`
16
17 namespaces map[string]string
18 parent *XMLNode
19}
20
21// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
22func NewXMLElement(name xml.Name) *XMLNode {
23 return &XMLNode{
24 Name: name,
25 Children: map[string][]*XMLNode{},
26 Attr: []xml.Attr{},
27 }
28}
29
30// AddChild adds child to the XMLNode.
31func (n *XMLNode) AddChild(child *XMLNode) {
32 if _, ok := n.Children[child.Name.Local]; !ok {
33 n.Children[child.Name.Local] = []*XMLNode{}
34 }
35 n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
36}
37
38// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
39func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
40 out := &XMLNode{}
41 for {
42 tok, err := d.Token()
43 if err != nil {
44 if err == io.EOF {
45 break
46 } else {
47 return out, err
48 }
49 }
50
51 if tok == nil {
52 break
53 }
54
55 switch typed := tok.(type) {
56 case xml.CharData:
57 out.Text = string(typed.Copy())
58 case xml.StartElement:
59 el := typed.Copy()
60 out.Attr = el.Attr
61 if out.Children == nil {
62 out.Children = map[string][]*XMLNode{}
63 }
64
65 name := typed.Name.Local
66 slice := out.Children[name]
67 if slice == nil {
68 slice = []*XMLNode{}
69 }
70 node, e := XMLToStruct(d, &el)
71 out.findNamespaces()
72 if e != nil {
73 return out, e
74 }
75 node.Name = typed.Name
76 node.findNamespaces()
77 tempOut := *out
78 // Save into a temp variable, simply because out gets squashed during
79 // loop iterations
80 node.parent = &tempOut
81 slice = append(slice, node)
82 out.Children[name] = slice
83 case xml.EndElement:
84 if s != nil && s.Name.Local == typed.Name.Local { // matching end token
85 return out, nil
86 }
87 out = &XMLNode{}
88 }
89 }
90 return out, nil
91}
92
93func (n *XMLNode) findNamespaces() {
94 ns := map[string]string{}
95 for _, a := range n.Attr {
96 if a.Name.Space == "xmlns" {
97 ns[a.Value] = a.Name.Local
98 }
99 }
100
101 n.namespaces = ns
102}
103
104func (n *XMLNode) findElem(name string) (string, bool) {
105 for node := n; node != nil; node = node.parent {
106 for _, a := range node.Attr {
107 namespace := a.Name.Space
108 if v, ok := node.namespaces[namespace]; ok {
109 namespace = v
110 }
111 if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) {
112 return a.Value, true
113 }
114 }
115 }
116 return "", false
117}
118
119// StructToXML writes an XMLNode to a xml.Encoder as tokens.
120func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
121 e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
122
123 if node.Text != "" {
124 e.EncodeToken(xml.CharData([]byte(node.Text)))
125 } else if sorted {
126 sortedNames := []string{}
127 for k := range node.Children {
128 sortedNames = append(sortedNames, k)
129 }
130 sort.Strings(sortedNames)
131
132 for _, k := range sortedNames {
133 for _, v := range node.Children[k] {
134 StructToXML(e, v, sorted)
135 }
136 }
137 } else {
138 for _, c := range node.Children {
139 for _, v := range c {
140 StructToXML(e, v, sorted)
141 }
142 }
143 }
144
145 e.EncodeToken(xml.EndElement{Name: node.Name})
146 return e.Flush()
147}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
new file mode 100644
index 0000000..52ac02c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -0,0 +1,19245 @@
1// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
2
3package s3
4
5import (
6 "fmt"
7 "io"
8 "time"
9
10 "github.com/aws/aws-sdk-go/aws"
11 "github.com/aws/aws-sdk-go/aws/awsutil"
12 "github.com/aws/aws-sdk-go/aws/request"
13 "github.com/aws/aws-sdk-go/private/protocol"
14 "github.com/aws/aws-sdk-go/private/protocol/restxml"
15)
16
17const opAbortMultipartUpload = "AbortMultipartUpload"
18
19// AbortMultipartUploadRequest generates a "aws/request.Request" representing the
20// client's request for the AbortMultipartUpload operation. The "output" return
21// value can be used to capture response data after the request's "Send" method
22// is called.
23//
24// See AbortMultipartUpload for usage and error information.
25//
26// Creating a request object using this method should be used when you want to inject
27// custom logic into the request's lifecycle using a custom handler, or if you want to
28// access properties on the request object before or after sending the request. If
29// you just want the service response, call the AbortMultipartUpload method directly
30// instead.
31//
32// Note: You must call the "Send" method on the returned request object in order
33// to execute the request.
34//
35// // Example sending a request using the AbortMultipartUploadRequest method.
36// req, resp := client.AbortMultipartUploadRequest(params)
37//
38// err := req.Send()
39// if err == nil { // resp is now filled
40// fmt.Println(resp)
41// }
42//
43// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
44func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) {
45 op := &request.Operation{
46 Name: opAbortMultipartUpload,
47 HTTPMethod: "DELETE",
48 HTTPPath: "/{Bucket}/{Key+}",
49 }
50
51 if input == nil {
52 input = &AbortMultipartUploadInput{}
53 }
54
55 output = &AbortMultipartUploadOutput{}
56 req = c.newRequest(op, input, output)
57 return
58}
59
60// AbortMultipartUpload API operation for Amazon Simple Storage Service.
61//
62// Aborts a multipart upload.
63//
64// To verify that all parts have been removed, so you don't get charged for
65// the part storage, you should call the List Parts operation and ensure the
66// parts list is empty.
67//
68// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
69// with awserr.Error's Code and Message methods to get detailed information about
70// the error.
71//
72// See the AWS API reference guide for Amazon Simple Storage Service's
73// API operation AbortMultipartUpload for usage and error information.
74//
75// Returned Error Codes:
76// * ErrCodeNoSuchUpload "NoSuchUpload"
77// The specified multipart upload does not exist.
78//
79// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
80func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) {
81 req, out := c.AbortMultipartUploadRequest(input)
82 return out, req.Send()
83}
84
85// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of
86// the ability to pass a context and additional request options.
87//
88// See AbortMultipartUpload for details on how to use this API operation.
89//
90// The context must be non-nil and will be used for request cancellation. If
91// the context is nil a panic will occur. In the future the SDK may create
92// sub-contexts for http.Requests. See https://golang.org/pkg/context/
93// for more information on using Contexts.
94func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) {
95 req, out := c.AbortMultipartUploadRequest(input)
96 req.SetContext(ctx)
97 req.ApplyOptions(opts...)
98 return out, req.Send()
99}
100
101const opCompleteMultipartUpload = "CompleteMultipartUpload"
102
103// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the
104// client's request for the CompleteMultipartUpload operation. The "output" return
105// value can be used to capture response data after the request's "Send" method
106// is called.
107//
108// See CompleteMultipartUpload for usage and error information.
109//
110// Creating a request object using this method should be used when you want to inject
111// custom logic into the request's lifecycle using a custom handler, or if you want to
112// access properties on the request object before or after sending the request. If
113// you just want the service response, call the CompleteMultipartUpload method directly
114// instead.
115//
116// Note: You must call the "Send" method on the returned request object in order
117// to execute the request.
118//
119// // Example sending a request using the CompleteMultipartUploadRequest method.
120// req, resp := client.CompleteMultipartUploadRequest(params)
121//
122// err := req.Send()
123// if err == nil { // resp is now filled
124// fmt.Println(resp)
125// }
126//
127// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
128func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) {
129 op := &request.Operation{
130 Name: opCompleteMultipartUpload,
131 HTTPMethod: "POST",
132 HTTPPath: "/{Bucket}/{Key+}",
133 }
134
135 if input == nil {
136 input = &CompleteMultipartUploadInput{}
137 }
138
139 output = &CompleteMultipartUploadOutput{}
140 req = c.newRequest(op, input, output)
141 return
142}
143
144// CompleteMultipartUpload API operation for Amazon Simple Storage Service.
145//
146// Completes a multipart upload by assembling previously uploaded parts.
147//
148// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
149// with awserr.Error's Code and Message methods to get detailed information about
150// the error.
151//
152// See the AWS API reference guide for Amazon Simple Storage Service's
153// API operation CompleteMultipartUpload for usage and error information.
154// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
155func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) {
156 req, out := c.CompleteMultipartUploadRequest(input)
157 return out, req.Send()
158}
159
160// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of
161// the ability to pass a context and additional request options.
162//
163// See CompleteMultipartUpload for details on how to use this API operation.
164//
165// The context must be non-nil and will be used for request cancellation. If
166// the context is nil a panic will occur. In the future the SDK may create
167// sub-contexts for http.Requests. See https://golang.org/pkg/context/
168// for more information on using Contexts.
169func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) {
170 req, out := c.CompleteMultipartUploadRequest(input)
171 req.SetContext(ctx)
172 req.ApplyOptions(opts...)
173 return out, req.Send()
174}
175
176const opCopyObject = "CopyObject"
177
178// CopyObjectRequest generates a "aws/request.Request" representing the
179// client's request for the CopyObject operation. The "output" return
180// value can be used to capture response data after the request's "Send" method
181// is called.
182//
183// See CopyObject for usage and error information.
184//
185// Creating a request object using this method should be used when you want to inject
186// custom logic into the request's lifecycle using a custom handler, or if you want to
187// access properties on the request object before or after sending the request. If
188// you just want the service response, call the CopyObject method directly
189// instead.
190//
191// Note: You must call the "Send" method on the returned request object in order
192// to execute the request.
193//
194// // Example sending a request using the CopyObjectRequest method.
195// req, resp := client.CopyObjectRequest(params)
196//
197// err := req.Send()
198// if err == nil { // resp is now filled
199// fmt.Println(resp)
200// }
201//
202// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
203func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) {
204 op := &request.Operation{
205 Name: opCopyObject,
206 HTTPMethod: "PUT",
207 HTTPPath: "/{Bucket}/{Key+}",
208 }
209
210 if input == nil {
211 input = &CopyObjectInput{}
212 }
213
214 output = &CopyObjectOutput{}
215 req = c.newRequest(op, input, output)
216 return
217}
218
219// CopyObject API operation for Amazon Simple Storage Service.
220//
221// Creates a copy of an object that is already stored in Amazon S3.
222//
223// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
224// with awserr.Error's Code and Message methods to get detailed information about
225// the error.
226//
227// See the AWS API reference guide for Amazon Simple Storage Service's
228// API operation CopyObject for usage and error information.
229//
230// Returned Error Codes:
231// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError"
232// The source object of the COPY operation is not in the active tier and is
233// only stored in Amazon Glacier.
234//
235// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
236func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) {
237 req, out := c.CopyObjectRequest(input)
238 return out, req.Send()
239}
240
241// CopyObjectWithContext is the same as CopyObject with the addition of
242// the ability to pass a context and additional request options.
243//
244// See CopyObject for details on how to use this API operation.
245//
246// The context must be non-nil and will be used for request cancellation. If
247// the context is nil a panic will occur. In the future the SDK may create
248// sub-contexts for http.Requests. See https://golang.org/pkg/context/
249// for more information on using Contexts.
250func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) {
251 req, out := c.CopyObjectRequest(input)
252 req.SetContext(ctx)
253 req.ApplyOptions(opts...)
254 return out, req.Send()
255}
256
257const opCreateBucket = "CreateBucket"
258
259// CreateBucketRequest generates a "aws/request.Request" representing the
260// client's request for the CreateBucket operation. The "output" return
261// value can be used to capture response data after the request's "Send" method
262// is called.
263//
264// See CreateBucket for usage and error information.
265//
266// Creating a request object using this method should be used when you want to inject
267// custom logic into the request's lifecycle using a custom handler, or if you want to
268// access properties on the request object before or after sending the request. If
269// you just want the service response, call the CreateBucket method directly
270// instead.
271//
272// Note: You must call the "Send" method on the returned request object in order
273// to execute the request.
274//
275// // Example sending a request using the CreateBucketRequest method.
276// req, resp := client.CreateBucketRequest(params)
277//
278// err := req.Send()
279// if err == nil { // resp is now filled
280// fmt.Println(resp)
281// }
282//
283// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
284func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) {
285 op := &request.Operation{
286 Name: opCreateBucket,
287 HTTPMethod: "PUT",
288 HTTPPath: "/{Bucket}",
289 }
290
291 if input == nil {
292 input = &CreateBucketInput{}
293 }
294
295 output = &CreateBucketOutput{}
296 req = c.newRequest(op, input, output)
297 return
298}
299
300// CreateBucket API operation for Amazon Simple Storage Service.
301//
302// Creates a new bucket.
303//
304// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
305// with awserr.Error's Code and Message methods to get detailed information about
306// the error.
307//
308// See the AWS API reference guide for Amazon Simple Storage Service's
309// API operation CreateBucket for usage and error information.
310//
311// Returned Error Codes:
312// * ErrCodeBucketAlreadyExists "BucketAlreadyExists"
313// The requested bucket name is not available. The bucket namespace is shared
314// by all users of the system. Please select a different name and try again.
315//
316// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou"
317//
318// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
319func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) {
320 req, out := c.CreateBucketRequest(input)
321 return out, req.Send()
322}
323
324// CreateBucketWithContext is the same as CreateBucket with the addition of
325// the ability to pass a context and additional request options.
326//
327// See CreateBucket for details on how to use this API operation.
328//
329// The context must be non-nil and will be used for request cancellation. If
330// the context is nil a panic will occur. In the future the SDK may create
331// sub-contexts for http.Requests. See https://golang.org/pkg/context/
332// for more information on using Contexts.
333func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) {
334 req, out := c.CreateBucketRequest(input)
335 req.SetContext(ctx)
336 req.ApplyOptions(opts...)
337 return out, req.Send()
338}
339
340const opCreateMultipartUpload = "CreateMultipartUpload"
341
342// CreateMultipartUploadRequest generates a "aws/request.Request" representing the
343// client's request for the CreateMultipartUpload operation. The "output" return
344// value can be used to capture response data after the request's "Send" method
345// is called.
346//
347// See CreateMultipartUpload for usage and error information.
348//
349// Creating a request object using this method should be used when you want to inject
350// custom logic into the request's lifecycle using a custom handler, or if you want to
351// access properties on the request object before or after sending the request. If
352// you just want the service response, call the CreateMultipartUpload method directly
353// instead.
354//
355// Note: You must call the "Send" method on the returned request object in order
356// to execute the request.
357//
358// // Example sending a request using the CreateMultipartUploadRequest method.
359// req, resp := client.CreateMultipartUploadRequest(params)
360//
361// err := req.Send()
362// if err == nil { // resp is now filled
363// fmt.Println(resp)
364// }
365//
366// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
367func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) {
368 op := &request.Operation{
369 Name: opCreateMultipartUpload,
370 HTTPMethod: "POST",
371 HTTPPath: "/{Bucket}/{Key+}?uploads",
372 }
373
374 if input == nil {
375 input = &CreateMultipartUploadInput{}
376 }
377
378 output = &CreateMultipartUploadOutput{}
379 req = c.newRequest(op, input, output)
380 return
381}
382
383// CreateMultipartUpload API operation for Amazon Simple Storage Service.
384//
385// Initiates a multipart upload and returns an upload ID.
386//
387// Note: After you initiate multipart upload and upload one or more parts, you
388// must either complete or abort multipart upload in order to stop getting charged
389// for storage of the uploaded parts. Only after you either complete or abort
390// multipart upload, Amazon S3 frees up the parts storage and stops charging
391// you for the parts storage.
392//
393// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
394// with awserr.Error's Code and Message methods to get detailed information about
395// the error.
396//
397// See the AWS API reference guide for Amazon Simple Storage Service's
398// API operation CreateMultipartUpload for usage and error information.
399// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
400func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) {
401 req, out := c.CreateMultipartUploadRequest(input)
402 return out, req.Send()
403}
404
405// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of
406// the ability to pass a context and additional request options.
407//
408// See CreateMultipartUpload for details on how to use this API operation.
409//
410// The context must be non-nil and will be used for request cancellation. If
411// the context is nil a panic will occur. In the future the SDK may create
412// sub-contexts for http.Requests. See https://golang.org/pkg/context/
413// for more information on using Contexts.
414func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) {
415 req, out := c.CreateMultipartUploadRequest(input)
416 req.SetContext(ctx)
417 req.ApplyOptions(opts...)
418 return out, req.Send()
419}
420
421const opDeleteBucket = "DeleteBucket"
422
423// DeleteBucketRequest generates a "aws/request.Request" representing the
424// client's request for the DeleteBucket operation. The "output" return
425// value can be used to capture response data after the request's "Send" method
426// is called.
427//
428// See DeleteBucket for usage and error information.
429//
430// Creating a request object using this method should be used when you want to inject
431// custom logic into the request's lifecycle using a custom handler, or if you want to
432// access properties on the request object before or after sending the request. If
433// you just want the service response, call the DeleteBucket method directly
434// instead.
435//
436// Note: You must call the "Send" method on the returned request object in order
437// to execute the request.
438//
439// // Example sending a request using the DeleteBucketRequest method.
440// req, resp := client.DeleteBucketRequest(params)
441//
442// err := req.Send()
443// if err == nil { // resp is now filled
444// fmt.Println(resp)
445// }
446//
447// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
448func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) {
449 op := &request.Operation{
450 Name: opDeleteBucket,
451 HTTPMethod: "DELETE",
452 HTTPPath: "/{Bucket}",
453 }
454
455 if input == nil {
456 input = &DeleteBucketInput{}
457 }
458
459 output = &DeleteBucketOutput{}
460 req = c.newRequest(op, input, output)
461 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
462 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
463 return
464}
465
466// DeleteBucket API operation for Amazon Simple Storage Service.
467//
468// Deletes the bucket. All objects (including all object versions and Delete
469// Markers) in the bucket must be deleted before the bucket itself can be deleted.
470//
471// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
472// with awserr.Error's Code and Message methods to get detailed information about
473// the error.
474//
475// See the AWS API reference guide for Amazon Simple Storage Service's
476// API operation DeleteBucket for usage and error information.
477// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
478func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) {
479 req, out := c.DeleteBucketRequest(input)
480 return out, req.Send()
481}
482
483// DeleteBucketWithContext is the same as DeleteBucket with the addition of
484// the ability to pass a context and additional request options.
485//
486// See DeleteBucket for details on how to use this API operation.
487//
488// The context must be non-nil and will be used for request cancellation. If
489// the context is nil a panic will occur. In the future the SDK may create
490// sub-contexts for http.Requests. See https://golang.org/pkg/context/
491// for more information on using Contexts.
492func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) {
493 req, out := c.DeleteBucketRequest(input)
494 req.SetContext(ctx)
495 req.ApplyOptions(opts...)
496 return out, req.Send()
497}
498
499const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration"
500
501// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
502// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return
503// value can be used to capture response data after the request's "Send" method
504// is called.
505//
506// See DeleteBucketAnalyticsConfiguration for usage and error information.
507//
508// Creating a request object using this method should be used when you want to inject
509// custom logic into the request's lifecycle using a custom handler, or if you want to
510// access properties on the request object before or after sending the request. If
511// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly
512// instead.
513//
514// Note: You must call the "Send" method on the returned request object in order
515// to execute the request.
516//
517// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method.
518// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params)
519//
520// err := req.Send()
521// if err == nil { // resp is now filled
522// fmt.Println(resp)
523// }
524//
525// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration
526func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) {
527 op := &request.Operation{
528 Name: opDeleteBucketAnalyticsConfiguration,
529 HTTPMethod: "DELETE",
530 HTTPPath: "/{Bucket}?analytics",
531 }
532
533 if input == nil {
534 input = &DeleteBucketAnalyticsConfigurationInput{}
535 }
536
537 output = &DeleteBucketAnalyticsConfigurationOutput{}
538 req = c.newRequest(op, input, output)
539 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
540 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
541 return
542}
543
544// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
545//
546// Deletes an analytics configuration for the bucket (specified by the analytics
547// configuration ID).
548//
549// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
550// with awserr.Error's Code and Message methods to get detailed information about
551// the error.
552//
553// See the AWS API reference guide for Amazon Simple Storage Service's
554// API operation DeleteBucketAnalyticsConfiguration for usage and error information.
555// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration
556func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) {
557 req, out := c.DeleteBucketAnalyticsConfigurationRequest(input)
558 return out, req.Send()
559}
560
561// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of
562// the ability to pass a context and additional request options.
563//
564// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation.
565//
566// The context must be non-nil and will be used for request cancellation. If
567// the context is nil a panic will occur. In the future the SDK may create
568// sub-contexts for http.Requests. See https://golang.org/pkg/context/
569// for more information on using Contexts.
570func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) {
571 req, out := c.DeleteBucketAnalyticsConfigurationRequest(input)
572 req.SetContext(ctx)
573 req.ApplyOptions(opts...)
574 return out, req.Send()
575}
576
577const opDeleteBucketCors = "DeleteBucketCors"
578
579// DeleteBucketCorsRequest generates a "aws/request.Request" representing the
580// client's request for the DeleteBucketCors operation. The "output" return
581// value can be used to capture response data after the request's "Send" method
582// is called.
583//
584// See DeleteBucketCors for usage and error information.
585//
586// Creating a request object using this method should be used when you want to inject
587// custom logic into the request's lifecycle using a custom handler, or if you want to
588// access properties on the request object before or after sending the request. If
589// you just want the service response, call the DeleteBucketCors method directly
590// instead.
591//
592// Note: You must call the "Send" method on the returned request object in order
593// to execute the request.
594//
595// // Example sending a request using the DeleteBucketCorsRequest method.
596// req, resp := client.DeleteBucketCorsRequest(params)
597//
598// err := req.Send()
599// if err == nil { // resp is now filled
600// fmt.Println(resp)
601// }
602//
603// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
604func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) {
605 op := &request.Operation{
606 Name: opDeleteBucketCors,
607 HTTPMethod: "DELETE",
608 HTTPPath: "/{Bucket}?cors",
609 }
610
611 if input == nil {
612 input = &DeleteBucketCorsInput{}
613 }
614
615 output = &DeleteBucketCorsOutput{}
616 req = c.newRequest(op, input, output)
617 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
618 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
619 return
620}
621
622// DeleteBucketCors API operation for Amazon Simple Storage Service.
623//
624// Deletes the cors configuration information set for the bucket.
625//
626// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
627// with awserr.Error's Code and Message methods to get detailed information about
628// the error.
629//
630// See the AWS API reference guide for Amazon Simple Storage Service's
631// API operation DeleteBucketCors for usage and error information.
632// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
633func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) {
634 req, out := c.DeleteBucketCorsRequest(input)
635 return out, req.Send()
636}
637
638// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of
639// the ability to pass a context and additional request options.
640//
641// See DeleteBucketCors for details on how to use this API operation.
642//
643// The context must be non-nil and will be used for request cancellation. If
644// the context is nil a panic will occur. In the future the SDK may create
645// sub-contexts for http.Requests. See https://golang.org/pkg/context/
646// for more information on using Contexts.
647func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) {
648 req, out := c.DeleteBucketCorsRequest(input)
649 req.SetContext(ctx)
650 req.ApplyOptions(opts...)
651 return out, req.Send()
652}
653
654const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration"
655
656// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
657// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return
658// value can be used to capture response data after the request's "Send" method
659// is called.
660//
661// See DeleteBucketInventoryConfiguration for usage and error information.
662//
663// Creating a request object using this method should be used when you want to inject
664// custom logic into the request's lifecycle using a custom handler, or if you want to
665// access properties on the request object before or after sending the request. If
666// you just want the service response, call the DeleteBucketInventoryConfiguration method directly
667// instead.
668//
669// Note: You must call the "Send" method on the returned request object in order
670// to execute the request.
671//
672// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method.
673// req, resp := client.DeleteBucketInventoryConfigurationRequest(params)
674//
675// err := req.Send()
676// if err == nil { // resp is now filled
677// fmt.Println(resp)
678// }
679//
680// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration
681func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) {
682 op := &request.Operation{
683 Name: opDeleteBucketInventoryConfiguration,
684 HTTPMethod: "DELETE",
685 HTTPPath: "/{Bucket}?inventory",
686 }
687
688 if input == nil {
689 input = &DeleteBucketInventoryConfigurationInput{}
690 }
691
692 output = &DeleteBucketInventoryConfigurationOutput{}
693 req = c.newRequest(op, input, output)
694 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
695 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
696 return
697}
698
699// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
700//
701// Deletes an inventory configuration (identified by the inventory ID) from
702// the bucket.
703//
704// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
705// with awserr.Error's Code and Message methods to get detailed information about
706// the error.
707//
708// See the AWS API reference guide for Amazon Simple Storage Service's
709// API operation DeleteBucketInventoryConfiguration for usage and error information.
710// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration
711func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) {
712 req, out := c.DeleteBucketInventoryConfigurationRequest(input)
713 return out, req.Send()
714}
715
716// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of
717// the ability to pass a context and additional request options.
718//
719// See DeleteBucketInventoryConfiguration for details on how to use this API operation.
720//
721// The context must be non-nil and will be used for request cancellation. If
722// the context is nil a panic will occur. In the future the SDK may create
723// sub-contexts for http.Requests. See https://golang.org/pkg/context/
724// for more information on using Contexts.
725func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) {
726 req, out := c.DeleteBucketInventoryConfigurationRequest(input)
727 req.SetContext(ctx)
728 req.ApplyOptions(opts...)
729 return out, req.Send()
730}
731
732const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
733
734// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the
735// client's request for the DeleteBucketLifecycle operation. The "output" return
736// value can be used to capture response data after the request's "Send" method
737// is called.
738//
739// See DeleteBucketLifecycle for usage and error information.
740//
741// Creating a request object using this method should be used when you want to inject
742// custom logic into the request's lifecycle using a custom handler, or if you want to
743// access properties on the request object before or after sending the request. If
744// you just want the service response, call the DeleteBucketLifecycle method directly
745// instead.
746//
747// Note: You must call the "Send" method on the returned request object in order
748// to execute the request.
749//
750// // Example sending a request using the DeleteBucketLifecycleRequest method.
751// req, resp := client.DeleteBucketLifecycleRequest(params)
752//
753// err := req.Send()
754// if err == nil { // resp is now filled
755// fmt.Println(resp)
756// }
757//
758// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
759func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) {
760 op := &request.Operation{
761 Name: opDeleteBucketLifecycle,
762 HTTPMethod: "DELETE",
763 HTTPPath: "/{Bucket}?lifecycle",
764 }
765
766 if input == nil {
767 input = &DeleteBucketLifecycleInput{}
768 }
769
770 output = &DeleteBucketLifecycleOutput{}
771 req = c.newRequest(op, input, output)
772 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
773 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
774 return
775}
776
777// DeleteBucketLifecycle API operation for Amazon Simple Storage Service.
778//
779// Deletes the lifecycle configuration from the bucket.
780//
781// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
782// with awserr.Error's Code and Message methods to get detailed information about
783// the error.
784//
785// See the AWS API reference guide for Amazon Simple Storage Service's
786// API operation DeleteBucketLifecycle for usage and error information.
787// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
788func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) {
789 req, out := c.DeleteBucketLifecycleRequest(input)
790 return out, req.Send()
791}
792
793// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of
794// the ability to pass a context and additional request options.
795//
796// See DeleteBucketLifecycle for details on how to use this API operation.
797//
798// The context must be non-nil and will be used for request cancellation. If
799// the context is nil a panic will occur. In the future the SDK may create
800// sub-contexts for http.Requests. See https://golang.org/pkg/context/
801// for more information on using Contexts.
802func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) {
803 req, out := c.DeleteBucketLifecycleRequest(input)
804 req.SetContext(ctx)
805 req.ApplyOptions(opts...)
806 return out, req.Send()
807}
808
809const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration"
810
811// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
812// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return
813// value can be used to capture response data after the request's "Send" method
814// is called.
815//
816// See DeleteBucketMetricsConfiguration for usage and error information.
817//
818// Creating a request object using this method should be used when you want to inject
819// custom logic into the request's lifecycle using a custom handler, or if you want to
820// access properties on the request object before or after sending the request. If
821// you just want the service response, call the DeleteBucketMetricsConfiguration method directly
822// instead.
823//
824// Note: You must call the "Send" method on the returned request object in order
825// to execute the request.
826//
827// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method.
828// req, resp := client.DeleteBucketMetricsConfigurationRequest(params)
829//
830// err := req.Send()
831// if err == nil { // resp is now filled
832// fmt.Println(resp)
833// }
834//
835// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration
836func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) {
837 op := &request.Operation{
838 Name: opDeleteBucketMetricsConfiguration,
839 HTTPMethod: "DELETE",
840 HTTPPath: "/{Bucket}?metrics",
841 }
842
843 if input == nil {
844 input = &DeleteBucketMetricsConfigurationInput{}
845 }
846
847 output = &DeleteBucketMetricsConfigurationOutput{}
848 req = c.newRequest(op, input, output)
849 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
850 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
851 return
852}
853
854// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
855//
856// Deletes a metrics configuration (specified by the metrics configuration ID)
857// from the bucket.
858//
859// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
860// with awserr.Error's Code and Message methods to get detailed information about
861// the error.
862//
863// See the AWS API reference guide for Amazon Simple Storage Service's
864// API operation DeleteBucketMetricsConfiguration for usage and error information.
865// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration
866func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) {
867 req, out := c.DeleteBucketMetricsConfigurationRequest(input)
868 return out, req.Send()
869}
870
871// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of
872// the ability to pass a context and additional request options.
873//
874// See DeleteBucketMetricsConfiguration for details on how to use this API operation.
875//
876// The context must be non-nil and will be used for request cancellation. If
877// the context is nil a panic will occur. In the future the SDK may create
878// sub-contexts for http.Requests. See https://golang.org/pkg/context/
879// for more information on using Contexts.
880func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) {
881 req, out := c.DeleteBucketMetricsConfigurationRequest(input)
882 req.SetContext(ctx)
883 req.ApplyOptions(opts...)
884 return out, req.Send()
885}
886
887const opDeleteBucketPolicy = "DeleteBucketPolicy"
888
889// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the
890// client's request for the DeleteBucketPolicy operation. The "output" return
891// value can be used to capture response data after the request's "Send" method
892// is called.
893//
894// See DeleteBucketPolicy for usage and error information.
895//
896// Creating a request object using this method should be used when you want to inject
897// custom logic into the request's lifecycle using a custom handler, or if you want to
898// access properties on the request object before or after sending the request. If
899// you just want the service response, call the DeleteBucketPolicy method directly
900// instead.
901//
902// Note: You must call the "Send" method on the returned request object in order
903// to execute the request.
904//
905// // Example sending a request using the DeleteBucketPolicyRequest method.
906// req, resp := client.DeleteBucketPolicyRequest(params)
907//
908// err := req.Send()
909// if err == nil { // resp is now filled
910// fmt.Println(resp)
911// }
912//
913// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy
914func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) {
915 op := &request.Operation{
916 Name: opDeleteBucketPolicy,
917 HTTPMethod: "DELETE",
918 HTTPPath: "/{Bucket}?policy",
919 }
920
921 if input == nil {
922 input = &DeleteBucketPolicyInput{}
923 }
924
925 output = &DeleteBucketPolicyOutput{}
926 req = c.newRequest(op, input, output)
927 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
928 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
929 return
930}
931
932// DeleteBucketPolicy API operation for Amazon Simple Storage Service.
933//
934// Deletes the policy from the bucket.
935//
936// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
937// with awserr.Error's Code and Message methods to get detailed information about
938// the error.
939//
940// See the AWS API reference guide for Amazon Simple Storage Service's
941// API operation DeleteBucketPolicy for usage and error information.
942// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy
943func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) {
944 req, out := c.DeleteBucketPolicyRequest(input)
945 return out, req.Send()
946}
947
948// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of
949// the ability to pass a context and additional request options.
950//
951// See DeleteBucketPolicy for details on how to use this API operation.
952//
953// The context must be non-nil and will be used for request cancellation. If
954// the context is nil a panic will occur. In the future the SDK may create
955// sub-contexts for http.Requests. See https://golang.org/pkg/context/
956// for more information on using Contexts.
957func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) {
958 req, out := c.DeleteBucketPolicyRequest(input)
959 req.SetContext(ctx)
960 req.ApplyOptions(opts...)
961 return out, req.Send()
962}
963
964const opDeleteBucketReplication = "DeleteBucketReplication"
965
966// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the
967// client's request for the DeleteBucketReplication operation. The "output" return
968// value can be used to capture response data after the request's "Send" method
969// is called.
970//
971// See DeleteBucketReplication for usage and error information.
972//
973// Creating a request object using this method should be used when you want to inject
974// custom logic into the request's lifecycle using a custom handler, or if you want to
975// access properties on the request object before or after sending the request. If
976// you just want the service response, call the DeleteBucketReplication method directly
977// instead.
978//
979// Note: You must call the "Send" method on the returned request object in order
980// to execute the request.
981//
982// // Example sending a request using the DeleteBucketReplicationRequest method.
983// req, resp := client.DeleteBucketReplicationRequest(params)
984//
985// err := req.Send()
986// if err == nil { // resp is now filled
987// fmt.Println(resp)
988// }
989//
990// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
991func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) {
992 op := &request.Operation{
993 Name: opDeleteBucketReplication,
994 HTTPMethod: "DELETE",
995 HTTPPath: "/{Bucket}?replication",
996 }
997
998 if input == nil {
999 input = &DeleteBucketReplicationInput{}
1000 }
1001
1002 output = &DeleteBucketReplicationOutput{}
1003 req = c.newRequest(op, input, output)
1004 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
1005 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
1006 return
1007}
1008
1009// DeleteBucketReplication API operation for Amazon Simple Storage Service.
1010//
1011// Deletes the replication configuration from the bucket.
1012//
1013// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1014// with awserr.Error's Code and Message methods to get detailed information about
1015// the error.
1016//
1017// See the AWS API reference guide for Amazon Simple Storage Service's
1018// API operation DeleteBucketReplication for usage and error information.
1019// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
1020func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
1021 req, out := c.DeleteBucketReplicationRequest(input)
1022 return out, req.Send()
1023}
1024
1025// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of
1026// the ability to pass a context and additional request options.
1027//
1028// See DeleteBucketReplication for details on how to use this API operation.
1029//
1030// The context must be non-nil and will be used for request cancellation. If
1031// the context is nil a panic will occur. In the future the SDK may create
1032// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1033// for more information on using Contexts.
1034func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) {
1035 req, out := c.DeleteBucketReplicationRequest(input)
1036 req.SetContext(ctx)
1037 req.ApplyOptions(opts...)
1038 return out, req.Send()
1039}
1040
1041const opDeleteBucketTagging = "DeleteBucketTagging"
1042
1043// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the
1044// client's request for the DeleteBucketTagging operation. The "output" return
1045// value can be used to capture response data after the request's "Send" method
1046// is called.
1047//
1048// See DeleteBucketTagging for usage and error information.
1049//
1050// Creating a request object using this method should be used when you want to inject
1051// custom logic into the request's lifecycle using a custom handler, or if you want to
1052// access properties on the request object before or after sending the request. If
1053// you just want the service response, call the DeleteBucketTagging method directly
1054// instead.
1055//
1056// Note: You must call the "Send" method on the returned request object in order
1057// to execute the request.
1058//
1059// // Example sending a request using the DeleteBucketTaggingRequest method.
1060// req, resp := client.DeleteBucketTaggingRequest(params)
1061//
1062// err := req.Send()
1063// if err == nil { // resp is now filled
1064// fmt.Println(resp)
1065// }
1066//
1067// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging
1068func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) {
1069 op := &request.Operation{
1070 Name: opDeleteBucketTagging,
1071 HTTPMethod: "DELETE",
1072 HTTPPath: "/{Bucket}?tagging",
1073 }
1074
1075 if input == nil {
1076 input = &DeleteBucketTaggingInput{}
1077 }
1078
1079 output = &DeleteBucketTaggingOutput{}
1080 req = c.newRequest(op, input, output)
1081 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
1082 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
1083 return
1084}
1085
1086// DeleteBucketTagging API operation for Amazon Simple Storage Service.
1087//
1088// Deletes the tags from the bucket.
1089//
1090// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1091// with awserr.Error's Code and Message methods to get detailed information about
1092// the error.
1093//
1094// See the AWS API reference guide for Amazon Simple Storage Service's
1095// API operation DeleteBucketTagging for usage and error information.
1096// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging
1097func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) {
1098 req, out := c.DeleteBucketTaggingRequest(input)
1099 return out, req.Send()
1100}
1101
1102// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of
1103// the ability to pass a context and additional request options.
1104//
1105// See DeleteBucketTagging for details on how to use this API operation.
1106//
1107// The context must be non-nil and will be used for request cancellation. If
1108// the context is nil a panic will occur. In the future the SDK may create
1109// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1110// for more information on using Contexts.
1111func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) {
1112 req, out := c.DeleteBucketTaggingRequest(input)
1113 req.SetContext(ctx)
1114 req.ApplyOptions(opts...)
1115 return out, req.Send()
1116}
1117
1118const opDeleteBucketWebsite = "DeleteBucketWebsite"
1119
1120// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the
1121// client's request for the DeleteBucketWebsite operation. The "output" return
1122// value can be used to capture response data after the request's "Send" method
1123// is called.
1124//
1125// See DeleteBucketWebsite for usage and error information.
1126//
1127// Creating a request object using this method should be used when you want to inject
1128// custom logic into the request's lifecycle using a custom handler, or if you want to
1129// access properties on the request object before or after sending the request. If
1130// you just want the service response, call the DeleteBucketWebsite method directly
1131// instead.
1132//
1133// Note: You must call the "Send" method on the returned request object in order
1134// to execute the request.
1135//
1136// // Example sending a request using the DeleteBucketWebsiteRequest method.
1137// req, resp := client.DeleteBucketWebsiteRequest(params)
1138//
1139// err := req.Send()
1140// if err == nil { // resp is now filled
1141// fmt.Println(resp)
1142// }
1143//
1144// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
1145func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) {
1146 op := &request.Operation{
1147 Name: opDeleteBucketWebsite,
1148 HTTPMethod: "DELETE",
1149 HTTPPath: "/{Bucket}?website",
1150 }
1151
1152 if input == nil {
1153 input = &DeleteBucketWebsiteInput{}
1154 }
1155
1156 output = &DeleteBucketWebsiteOutput{}
1157 req = c.newRequest(op, input, output)
1158 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
1159 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
1160 return
1161}
1162
1163// DeleteBucketWebsite API operation for Amazon Simple Storage Service.
1164//
1165// This operation removes the website configuration from the bucket.
1166//
1167// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1168// with awserr.Error's Code and Message methods to get detailed information about
1169// the error.
1170//
1171// See the AWS API reference guide for Amazon Simple Storage Service's
1172// API operation DeleteBucketWebsite for usage and error information.
1173// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
1174func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) {
1175 req, out := c.DeleteBucketWebsiteRequest(input)
1176 return out, req.Send()
1177}
1178
1179// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of
1180// the ability to pass a context and additional request options.
1181//
1182// See DeleteBucketWebsite for details on how to use this API operation.
1183//
1184// The context must be non-nil and will be used for request cancellation. If
1185// the context is nil a panic will occur. In the future the SDK may create
1186// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1187// for more information on using Contexts.
1188func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) {
1189 req, out := c.DeleteBucketWebsiteRequest(input)
1190 req.SetContext(ctx)
1191 req.ApplyOptions(opts...)
1192 return out, req.Send()
1193}
1194
1195const opDeleteObject = "DeleteObject"
1196
1197// DeleteObjectRequest generates a "aws/request.Request" representing the
1198// client's request for the DeleteObject operation. The "output" return
1199// value can be used to capture response data after the request's "Send" method
1200// is called.
1201//
1202// See DeleteObject for usage and error information.
1203//
1204// Creating a request object using this method should be used when you want to inject
1205// custom logic into the request's lifecycle using a custom handler, or if you want to
1206// access properties on the request object before or after sending the request. If
1207// you just want the service response, call the DeleteObject method directly
1208// instead.
1209//
1210// Note: You must call the "Send" method on the returned request object in order
1211// to execute the request.
1212//
1213// // Example sending a request using the DeleteObjectRequest method.
1214// req, resp := client.DeleteObjectRequest(params)
1215//
1216// err := req.Send()
1217// if err == nil { // resp is now filled
1218// fmt.Println(resp)
1219// }
1220//
1221// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
1222func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) {
1223 op := &request.Operation{
1224 Name: opDeleteObject,
1225 HTTPMethod: "DELETE",
1226 HTTPPath: "/{Bucket}/{Key+}",
1227 }
1228
1229 if input == nil {
1230 input = &DeleteObjectInput{}
1231 }
1232
1233 output = &DeleteObjectOutput{}
1234 req = c.newRequest(op, input, output)
1235 return
1236}
1237
1238// DeleteObject API operation for Amazon Simple Storage Service.
1239//
1240// Removes the null version (if there is one) of an object and inserts a delete
1241// marker, which becomes the latest version of the object. If there isn't a
1242// null version, Amazon S3 does not remove any objects.
1243//
1244// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1245// with awserr.Error's Code and Message methods to get detailed information about
1246// the error.
1247//
1248// See the AWS API reference guide for Amazon Simple Storage Service's
1249// API operation DeleteObject for usage and error information.
1250// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
1251func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) {
1252 req, out := c.DeleteObjectRequest(input)
1253 return out, req.Send()
1254}
1255
1256// DeleteObjectWithContext is the same as DeleteObject with the addition of
1257// the ability to pass a context and additional request options.
1258//
1259// See DeleteObject for details on how to use this API operation.
1260//
1261// The context must be non-nil and will be used for request cancellation. If
1262// the context is nil a panic will occur. In the future the SDK may create
1263// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1264// for more information on using Contexts.
1265func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) {
1266 req, out := c.DeleteObjectRequest(input)
1267 req.SetContext(ctx)
1268 req.ApplyOptions(opts...)
1269 return out, req.Send()
1270}
1271
1272const opDeleteObjectTagging = "DeleteObjectTagging"
1273
1274// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the
1275// client's request for the DeleteObjectTagging operation. The "output" return
1276// value can be used to capture response data after the request's "Send" method
1277// is called.
1278//
1279// See DeleteObjectTagging for usage and error information.
1280//
1281// Creating a request object using this method should be used when you want to inject
1282// custom logic into the request's lifecycle using a custom handler, or if you want to
1283// access properties on the request object before or after sending the request. If
1284// you just want the service response, call the DeleteObjectTagging method directly
1285// instead.
1286//
1287// Note: You must call the "Send" method on the returned request object in order
1288// to execute the request.
1289//
1290// // Example sending a request using the DeleteObjectTaggingRequest method.
1291// req, resp := client.DeleteObjectTaggingRequest(params)
1292//
1293// err := req.Send()
1294// if err == nil { // resp is now filled
1295// fmt.Println(resp)
1296// }
1297//
1298// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
1299func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) {
1300 op := &request.Operation{
1301 Name: opDeleteObjectTagging,
1302 HTTPMethod: "DELETE",
1303 HTTPPath: "/{Bucket}/{Key+}?tagging",
1304 }
1305
1306 if input == nil {
1307 input = &DeleteObjectTaggingInput{}
1308 }
1309
1310 output = &DeleteObjectTaggingOutput{}
1311 req = c.newRequest(op, input, output)
1312 return
1313}
1314
1315// DeleteObjectTagging API operation for Amazon Simple Storage Service.
1316//
1317// Removes the tag-set from an existing object.
1318//
1319// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1320// with awserr.Error's Code and Message methods to get detailed information about
1321// the error.
1322//
1323// See the AWS API reference guide for Amazon Simple Storage Service's
1324// API operation DeleteObjectTagging for usage and error information.
1325// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
1326func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) {
1327 req, out := c.DeleteObjectTaggingRequest(input)
1328 return out, req.Send()
1329}
1330
1331// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of
1332// the ability to pass a context and additional request options.
1333//
1334// See DeleteObjectTagging for details on how to use this API operation.
1335//
1336// The context must be non-nil and will be used for request cancellation. If
1337// the context is nil a panic will occur. In the future the SDK may create
1338// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1339// for more information on using Contexts.
1340func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) {
1341 req, out := c.DeleteObjectTaggingRequest(input)
1342 req.SetContext(ctx)
1343 req.ApplyOptions(opts...)
1344 return out, req.Send()
1345}
1346
1347const opDeleteObjects = "DeleteObjects"
1348
1349// DeleteObjectsRequest generates a "aws/request.Request" representing the
1350// client's request for the DeleteObjects operation. The "output" return
1351// value can be used to capture response data after the request's "Send" method
1352// is called.
1353//
1354// See DeleteObjects for usage and error information.
1355//
1356// Creating a request object using this method should be used when you want to inject
1357// custom logic into the request's lifecycle using a custom handler, or if you want to
1358// access properties on the request object before or after sending the request. If
1359// you just want the service response, call the DeleteObjects method directly
1360// instead.
1361//
1362// Note: You must call the "Send" method on the returned request object in order
1363// to execute the request.
1364//
1365// // Example sending a request using the DeleteObjectsRequest method.
1366// req, resp := client.DeleteObjectsRequest(params)
1367//
1368// err := req.Send()
1369// if err == nil { // resp is now filled
1370// fmt.Println(resp)
1371// }
1372//
1373// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
1374func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) {
1375 op := &request.Operation{
1376 Name: opDeleteObjects,
1377 HTTPMethod: "POST",
1378 HTTPPath: "/{Bucket}?delete",
1379 }
1380
1381 if input == nil {
1382 input = &DeleteObjectsInput{}
1383 }
1384
1385 output = &DeleteObjectsOutput{}
1386 req = c.newRequest(op, input, output)
1387 return
1388}
1389
1390// DeleteObjects API operation for Amazon Simple Storage Service.
1391//
1392// This operation enables you to delete multiple objects from a bucket using
1393// a single HTTP request. You may specify up to 1000 keys.
1394//
1395// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1396// with awserr.Error's Code and Message methods to get detailed information about
1397// the error.
1398//
1399// See the AWS API reference guide for Amazon Simple Storage Service's
1400// API operation DeleteObjects for usage and error information.
1401// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
1402func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) {
1403 req, out := c.DeleteObjectsRequest(input)
1404 return out, req.Send()
1405}
1406
1407// DeleteObjectsWithContext is the same as DeleteObjects with the addition of
1408// the ability to pass a context and additional request options.
1409//
1410// See DeleteObjects for details on how to use this API operation.
1411//
1412// The context must be non-nil and will be used for request cancellation. If
1413// the context is nil a panic will occur. In the future the SDK may create
1414// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1415// for more information on using Contexts.
1416func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) {
1417 req, out := c.DeleteObjectsRequest(input)
1418 req.SetContext(ctx)
1419 req.ApplyOptions(opts...)
1420 return out, req.Send()
1421}
1422
1423const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration"
1424
1425// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
1426// client's request for the GetBucketAccelerateConfiguration operation. The "output" return
1427// value can be used to capture response data after the request's "Send" method
1428// is called.
1429//
1430// See GetBucketAccelerateConfiguration for usage and error information.
1431//
1432// Creating a request object using this method should be used when you want to inject
1433// custom logic into the request's lifecycle using a custom handler, or if you want to
1434// access properties on the request object before or after sending the request. If
1435// you just want the service response, call the GetBucketAccelerateConfiguration method directly
1436// instead.
1437//
1438// Note: You must call the "Send" method on the returned request object in order
1439// to execute the request.
1440//
1441// // Example sending a request using the GetBucketAccelerateConfigurationRequest method.
1442// req, resp := client.GetBucketAccelerateConfigurationRequest(params)
1443//
1444// err := req.Send()
1445// if err == nil { // resp is now filled
1446// fmt.Println(resp)
1447// }
1448//
1449// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration
1450func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) {
1451 op := &request.Operation{
1452 Name: opGetBucketAccelerateConfiguration,
1453 HTTPMethod: "GET",
1454 HTTPPath: "/{Bucket}?accelerate",
1455 }
1456
1457 if input == nil {
1458 input = &GetBucketAccelerateConfigurationInput{}
1459 }
1460
1461 output = &GetBucketAccelerateConfigurationOutput{}
1462 req = c.newRequest(op, input, output)
1463 return
1464}
1465
1466// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
1467//
1468// Returns the accelerate configuration of a bucket.
1469//
1470// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1471// with awserr.Error's Code and Message methods to get detailed information about
1472// the error.
1473//
1474// See the AWS API reference guide for Amazon Simple Storage Service's
1475// API operation GetBucketAccelerateConfiguration for usage and error information.
1476// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration
1477func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) {
1478 req, out := c.GetBucketAccelerateConfigurationRequest(input)
1479 return out, req.Send()
1480}
1481
1482// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of
1483// the ability to pass a context and additional request options.
1484//
1485// See GetBucketAccelerateConfiguration for details on how to use this API operation.
1486//
1487// The context must be non-nil and will be used for request cancellation. If
1488// the context is nil a panic will occur. In the future the SDK may create
1489// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1490// for more information on using Contexts.
1491func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) {
1492 req, out := c.GetBucketAccelerateConfigurationRequest(input)
1493 req.SetContext(ctx)
1494 req.ApplyOptions(opts...)
1495 return out, req.Send()
1496}
1497
1498const opGetBucketAcl = "GetBucketAcl"
1499
1500// GetBucketAclRequest generates a "aws/request.Request" representing the
1501// client's request for the GetBucketAcl operation. The "output" return
1502// value can be used to capture response data after the request's "Send" method
1503// is called.
1504//
1505// See GetBucketAcl for usage and error information.
1506//
1507// Creating a request object using this method should be used when you want to inject
1508// custom logic into the request's lifecycle using a custom handler, or if you want to
1509// access properties on the request object before or after sending the request. If
1510// you just want the service response, call the GetBucketAcl method directly
1511// instead.
1512//
1513// Note: You must call the "Send" method on the returned request object in order
1514// to execute the request.
1515//
1516// // Example sending a request using the GetBucketAclRequest method.
1517// req, resp := client.GetBucketAclRequest(params)
1518//
1519// err := req.Send()
1520// if err == nil { // resp is now filled
1521// fmt.Println(resp)
1522// }
1523//
1524// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
1525func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) {
1526 op := &request.Operation{
1527 Name: opGetBucketAcl,
1528 HTTPMethod: "GET",
1529 HTTPPath: "/{Bucket}?acl",
1530 }
1531
1532 if input == nil {
1533 input = &GetBucketAclInput{}
1534 }
1535
1536 output = &GetBucketAclOutput{}
1537 req = c.newRequest(op, input, output)
1538 return
1539}
1540
1541// GetBucketAcl API operation for Amazon Simple Storage Service.
1542//
1543// Gets the access control policy for the bucket.
1544//
1545// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1546// with awserr.Error's Code and Message methods to get detailed information about
1547// the error.
1548//
1549// See the AWS API reference guide for Amazon Simple Storage Service's
1550// API operation GetBucketAcl for usage and error information.
1551// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
1552func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) {
1553 req, out := c.GetBucketAclRequest(input)
1554 return out, req.Send()
1555}
1556
1557// GetBucketAclWithContext is the same as GetBucketAcl with the addition of
1558// the ability to pass a context and additional request options.
1559//
1560// See GetBucketAcl for details on how to use this API operation.
1561//
1562// The context must be non-nil and will be used for request cancellation. If
1563// the context is nil a panic will occur. In the future the SDK may create
1564// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1565// for more information on using Contexts.
1566func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) {
1567 req, out := c.GetBucketAclRequest(input)
1568 req.SetContext(ctx)
1569 req.ApplyOptions(opts...)
1570 return out, req.Send()
1571}
1572
1573const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration"
1574
1575// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
1576// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return
1577// value can be used to capture response data after the request's "Send" method
1578// is called.
1579//
1580// See GetBucketAnalyticsConfiguration for usage and error information.
1581//
1582// Creating a request object using this method should be used when you want to inject
1583// custom logic into the request's lifecycle using a custom handler, or if you want to
1584// access properties on the request object before or after sending the request. If
1585// you just want the service response, call the GetBucketAnalyticsConfiguration method directly
1586// instead.
1587//
1588// Note: You must call the "Send" method on the returned request object in order
1589// to execute the request.
1590//
1591// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method.
1592// req, resp := client.GetBucketAnalyticsConfigurationRequest(params)
1593//
1594// err := req.Send()
1595// if err == nil { // resp is now filled
1596// fmt.Println(resp)
1597// }
1598//
1599// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration
1600func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) {
1601 op := &request.Operation{
1602 Name: opGetBucketAnalyticsConfiguration,
1603 HTTPMethod: "GET",
1604 HTTPPath: "/{Bucket}?analytics",
1605 }
1606
1607 if input == nil {
1608 input = &GetBucketAnalyticsConfigurationInput{}
1609 }
1610
1611 output = &GetBucketAnalyticsConfigurationOutput{}
1612 req = c.newRequest(op, input, output)
1613 return
1614}
1615
1616// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
1617//
1618// Gets an analytics configuration for the bucket (specified by the analytics
1619// configuration ID).
1620//
1621// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1622// with awserr.Error's Code and Message methods to get detailed information about
1623// the error.
1624//
1625// See the AWS API reference guide for Amazon Simple Storage Service's
1626// API operation GetBucketAnalyticsConfiguration for usage and error information.
1627// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration
1628func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) {
1629 req, out := c.GetBucketAnalyticsConfigurationRequest(input)
1630 return out, req.Send()
1631}
1632
1633// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of
1634// the ability to pass a context and additional request options.
1635//
1636// See GetBucketAnalyticsConfiguration for details on how to use this API operation.
1637//
1638// The context must be non-nil and will be used for request cancellation. If
1639// the context is nil a panic will occur. In the future the SDK may create
1640// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1641// for more information on using Contexts.
1642func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) {
1643 req, out := c.GetBucketAnalyticsConfigurationRequest(input)
1644 req.SetContext(ctx)
1645 req.ApplyOptions(opts...)
1646 return out, req.Send()
1647}
1648
1649const opGetBucketCors = "GetBucketCors"
1650
1651// GetBucketCorsRequest generates a "aws/request.Request" representing the
1652// client's request for the GetBucketCors operation. The "output" return
1653// value can be used to capture response data after the request's "Send" method
1654// is called.
1655//
1656// See GetBucketCors for usage and error information.
1657//
1658// Creating a request object using this method should be used when you want to inject
1659// custom logic into the request's lifecycle using a custom handler, or if you want to
1660// access properties on the request object before or after sending the request. If
1661// you just want the service response, call the GetBucketCors method directly
1662// instead.
1663//
1664// Note: You must call the "Send" method on the returned request object in order
1665// to execute the request.
1666//
1667// // Example sending a request using the GetBucketCorsRequest method.
1668// req, resp := client.GetBucketCorsRequest(params)
1669//
1670// err := req.Send()
1671// if err == nil { // resp is now filled
1672// fmt.Println(resp)
1673// }
1674//
1675// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
1676func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) {
1677 op := &request.Operation{
1678 Name: opGetBucketCors,
1679 HTTPMethod: "GET",
1680 HTTPPath: "/{Bucket}?cors",
1681 }
1682
1683 if input == nil {
1684 input = &GetBucketCorsInput{}
1685 }
1686
1687 output = &GetBucketCorsOutput{}
1688 req = c.newRequest(op, input, output)
1689 return
1690}
1691
1692// GetBucketCors API operation for Amazon Simple Storage Service.
1693//
1694// Returns the cors configuration for the bucket.
1695//
1696// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1697// with awserr.Error's Code and Message methods to get detailed information about
1698// the error.
1699//
1700// See the AWS API reference guide for Amazon Simple Storage Service's
1701// API operation GetBucketCors for usage and error information.
1702// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
1703func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) {
1704 req, out := c.GetBucketCorsRequest(input)
1705 return out, req.Send()
1706}
1707
1708// GetBucketCorsWithContext is the same as GetBucketCors with the addition of
1709// the ability to pass a context and additional request options.
1710//
1711// See GetBucketCors for details on how to use this API operation.
1712//
1713// The context must be non-nil and will be used for request cancellation. If
1714// the context is nil a panic will occur. In the future the SDK may create
1715// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1716// for more information on using Contexts.
1717func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) {
1718 req, out := c.GetBucketCorsRequest(input)
1719 req.SetContext(ctx)
1720 req.ApplyOptions(opts...)
1721 return out, req.Send()
1722}
1723
1724const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration"
1725
1726// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
1727// client's request for the GetBucketInventoryConfiguration operation. The "output" return
1728// value can be used to capture response data after the request's "Send" method
1729// is called.
1730//
1731// See GetBucketInventoryConfiguration for usage and error information.
1732//
1733// Creating a request object using this method should be used when you want to inject
1734// custom logic into the request's lifecycle using a custom handler, or if you want to
1735// access properties on the request object before or after sending the request. If
1736// you just want the service response, call the GetBucketInventoryConfiguration method directly
1737// instead.
1738//
1739// Note: You must call the "Send" method on the returned request object in order
1740// to execute the request.
1741//
1742// // Example sending a request using the GetBucketInventoryConfigurationRequest method.
1743// req, resp := client.GetBucketInventoryConfigurationRequest(params)
1744//
1745// err := req.Send()
1746// if err == nil { // resp is now filled
1747// fmt.Println(resp)
1748// }
1749//
1750// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration
1751func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) {
1752 op := &request.Operation{
1753 Name: opGetBucketInventoryConfiguration,
1754 HTTPMethod: "GET",
1755 HTTPPath: "/{Bucket}?inventory",
1756 }
1757
1758 if input == nil {
1759 input = &GetBucketInventoryConfigurationInput{}
1760 }
1761
1762 output = &GetBucketInventoryConfigurationOutput{}
1763 req = c.newRequest(op, input, output)
1764 return
1765}
1766
1767// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
1768//
1769// Returns an inventory configuration (identified by the inventory ID) from
1770// the bucket.
1771//
1772// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1773// with awserr.Error's Code and Message methods to get detailed information about
1774// the error.
1775//
1776// See the AWS API reference guide for Amazon Simple Storage Service's
1777// API operation GetBucketInventoryConfiguration for usage and error information.
1778// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration
1779func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) {
1780 req, out := c.GetBucketInventoryConfigurationRequest(input)
1781 return out, req.Send()
1782}
1783
1784// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of
1785// the ability to pass a context and additional request options.
1786//
1787// See GetBucketInventoryConfiguration for details on how to use this API operation.
1788//
1789// The context must be non-nil and will be used for request cancellation. If
1790// the context is nil a panic will occur. In the future the SDK may create
1791// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1792// for more information on using Contexts.
1793func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) {
1794 req, out := c.GetBucketInventoryConfigurationRequest(input)
1795 req.SetContext(ctx)
1796 req.ApplyOptions(opts...)
1797 return out, req.Send()
1798}
1799
1800const opGetBucketLifecycle = "GetBucketLifecycle"
1801
1802// GetBucketLifecycleRequest generates a "aws/request.Request" representing the
1803// client's request for the GetBucketLifecycle operation. The "output" return
1804// value can be used to capture response data after the request's "Send" method
1805// is called.
1806//
1807// See GetBucketLifecycle for usage and error information.
1808//
1809// Creating a request object using this method should be used when you want to inject
1810// custom logic into the request's lifecycle using a custom handler, or if you want to
1811// access properties on the request object before or after sending the request. If
1812// you just want the service response, call the GetBucketLifecycle method directly
1813// instead.
1814//
1815// Note: You must call the "Send" method on the returned request object in order
1816// to execute the request.
1817//
1818// // Example sending a request using the GetBucketLifecycleRequest method.
1819// req, resp := client.GetBucketLifecycleRequest(params)
1820//
1821// err := req.Send()
1822// if err == nil { // resp is now filled
1823// fmt.Println(resp)
1824// }
1825//
1826// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
1827func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) {
1828 if c.Client.Config.Logger != nil {
1829 c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated")
1830 }
1831 op := &request.Operation{
1832 Name: opGetBucketLifecycle,
1833 HTTPMethod: "GET",
1834 HTTPPath: "/{Bucket}?lifecycle",
1835 }
1836
1837 if input == nil {
1838 input = &GetBucketLifecycleInput{}
1839 }
1840
1841 output = &GetBucketLifecycleOutput{}
1842 req = c.newRequest(op, input, output)
1843 return
1844}
1845
1846// GetBucketLifecycle API operation for Amazon Simple Storage Service.
1847//
1848// Deprecated, see the GetBucketLifecycleConfiguration operation.
1849//
1850// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1851// with awserr.Error's Code and Message methods to get detailed information about
1852// the error.
1853//
1854// See the AWS API reference guide for Amazon Simple Storage Service's
1855// API operation GetBucketLifecycle for usage and error information.
1856// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
1857func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) {
1858 req, out := c.GetBucketLifecycleRequest(input)
1859 return out, req.Send()
1860}
1861
1862// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of
1863// the ability to pass a context and additional request options.
1864//
1865// See GetBucketLifecycle for details on how to use this API operation.
1866//
1867// The context must be non-nil and will be used for request cancellation. If
1868// the context is nil a panic will occur. In the future the SDK may create
1869// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1870// for more information on using Contexts.
1871func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) {
1872 req, out := c.GetBucketLifecycleRequest(input)
1873 req.SetContext(ctx)
1874 req.ApplyOptions(opts...)
1875 return out, req.Send()
1876}
1877
1878const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
1879
1880// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
1881// client's request for the GetBucketLifecycleConfiguration operation. The "output" return
1882// value can be used to capture response data after the request's "Send" method
1883// is called.
1884//
1885// See GetBucketLifecycleConfiguration for usage and error information.
1886//
1887// Creating a request object using this method should be used when you want to inject
1888// custom logic into the request's lifecycle using a custom handler, or if you want to
1889// access properties on the request object before or after sending the request. If
1890// you just want the service response, call the GetBucketLifecycleConfiguration method directly
1891// instead.
1892//
1893// Note: You must call the "Send" method on the returned request object in order
1894// to execute the request.
1895//
1896// // Example sending a request using the GetBucketLifecycleConfigurationRequest method.
1897// req, resp := client.GetBucketLifecycleConfigurationRequest(params)
1898//
1899// err := req.Send()
1900// if err == nil { // resp is now filled
1901// fmt.Println(resp)
1902// }
1903//
1904// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
1905func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) {
1906 op := &request.Operation{
1907 Name: opGetBucketLifecycleConfiguration,
1908 HTTPMethod: "GET",
1909 HTTPPath: "/{Bucket}?lifecycle",
1910 }
1911
1912 if input == nil {
1913 input = &GetBucketLifecycleConfigurationInput{}
1914 }
1915
1916 output = &GetBucketLifecycleConfigurationOutput{}
1917 req = c.newRequest(op, input, output)
1918 return
1919}
1920
1921// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
1922//
1923// Returns the lifecycle configuration information set on the bucket.
1924//
1925// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1926// with awserr.Error's Code and Message methods to get detailed information about
1927// the error.
1928//
1929// See the AWS API reference guide for Amazon Simple Storage Service's
1930// API operation GetBucketLifecycleConfiguration for usage and error information.
1931// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
1932func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) {
1933 req, out := c.GetBucketLifecycleConfigurationRequest(input)
1934 return out, req.Send()
1935}
1936
1937// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of
1938// the ability to pass a context and additional request options.
1939//
1940// See GetBucketLifecycleConfiguration for details on how to use this API operation.
1941//
1942// The context must be non-nil and will be used for request cancellation. If
1943// the context is nil a panic will occur. In the future the SDK may create
1944// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1945// for more information on using Contexts.
1946func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) {
1947 req, out := c.GetBucketLifecycleConfigurationRequest(input)
1948 req.SetContext(ctx)
1949 req.ApplyOptions(opts...)
1950 return out, req.Send()
1951}
1952
1953const opGetBucketLocation = "GetBucketLocation"
1954
1955// GetBucketLocationRequest generates a "aws/request.Request" representing the
1956// client's request for the GetBucketLocation operation. The "output" return
1957// value can be used to capture response data after the request's "Send" method
1958// is called.
1959//
1960// See GetBucketLocation for usage and error information.
1961//
1962// Creating a request object using this method should be used when you want to inject
1963// custom logic into the request's lifecycle using a custom handler, or if you want to
1964// access properties on the request object before or after sending the request. If
1965// you just want the service response, call the GetBucketLocation method directly
1966// instead.
1967//
1968// Note: You must call the "Send" method on the returned request object in order
1969// to execute the request.
1970//
1971// // Example sending a request using the GetBucketLocationRequest method.
1972// req, resp := client.GetBucketLocationRequest(params)
1973//
1974// err := req.Send()
1975// if err == nil { // resp is now filled
1976// fmt.Println(resp)
1977// }
1978//
1979// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
1980func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) {
1981 op := &request.Operation{
1982 Name: opGetBucketLocation,
1983 HTTPMethod: "GET",
1984 HTTPPath: "/{Bucket}?location",
1985 }
1986
1987 if input == nil {
1988 input = &GetBucketLocationInput{}
1989 }
1990
1991 output = &GetBucketLocationOutput{}
1992 req = c.newRequest(op, input, output)
1993 return
1994}
1995
1996// GetBucketLocation API operation for Amazon Simple Storage Service.
1997//
1998// Returns the region the bucket resides in.
1999//
2000// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2001// with awserr.Error's Code and Message methods to get detailed information about
2002// the error.
2003//
2004// See the AWS API reference guide for Amazon Simple Storage Service's
2005// API operation GetBucketLocation for usage and error information.
2006// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
2007func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) {
2008 req, out := c.GetBucketLocationRequest(input)
2009 return out, req.Send()
2010}
2011
2012// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of
2013// the ability to pass a context and additional request options.
2014//
2015// See GetBucketLocation for details on how to use this API operation.
2016//
2017// The context must be non-nil and will be used for request cancellation. If
2018// the context is nil a panic will occur. In the future the SDK may create
2019// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2020// for more information on using Contexts.
2021func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) {
2022 req, out := c.GetBucketLocationRequest(input)
2023 req.SetContext(ctx)
2024 req.ApplyOptions(opts...)
2025 return out, req.Send()
2026}
2027
2028const opGetBucketLogging = "GetBucketLogging"
2029
2030// GetBucketLoggingRequest generates a "aws/request.Request" representing the
2031// client's request for the GetBucketLogging operation. The "output" return
2032// value can be used to capture response data after the request's "Send" method
2033// is called.
2034//
2035// See GetBucketLogging for usage and error information.
2036//
2037// Creating a request object using this method should be used when you want to inject
2038// custom logic into the request's lifecycle using a custom handler, or if you want to
2039// access properties on the request object before or after sending the request. If
2040// you just want the service response, call the GetBucketLogging method directly
2041// instead.
2042//
2043// Note: You must call the "Send" method on the returned request object in order
2044// to execute the request.
2045//
2046// // Example sending a request using the GetBucketLoggingRequest method.
2047// req, resp := client.GetBucketLoggingRequest(params)
2048//
2049// err := req.Send()
2050// if err == nil { // resp is now filled
2051// fmt.Println(resp)
2052// }
2053//
2054// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
2055func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) {
2056 op := &request.Operation{
2057 Name: opGetBucketLogging,
2058 HTTPMethod: "GET",
2059 HTTPPath: "/{Bucket}?logging",
2060 }
2061
2062 if input == nil {
2063 input = &GetBucketLoggingInput{}
2064 }
2065
2066 output = &GetBucketLoggingOutput{}
2067 req = c.newRequest(op, input, output)
2068 return
2069}
2070
2071// GetBucketLogging API operation for Amazon Simple Storage Service.
2072//
2073// Returns the logging status of a bucket and the permissions users have to
2074// view and modify that status. To use GET, you must be the bucket owner.
2075//
2076// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2077// with awserr.Error's Code and Message methods to get detailed information about
2078// the error.
2079//
2080// See the AWS API reference guide for Amazon Simple Storage Service's
2081// API operation GetBucketLogging for usage and error information.
2082// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
2083func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) {
2084 req, out := c.GetBucketLoggingRequest(input)
2085 return out, req.Send()
2086}
2087
2088// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of
2089// the ability to pass a context and additional request options.
2090//
2091// See GetBucketLogging for details on how to use this API operation.
2092//
2093// The context must be non-nil and will be used for request cancellation. If
2094// the context is nil a panic will occur. In the future the SDK may create
2095// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2096// for more information on using Contexts.
2097func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) {
2098 req, out := c.GetBucketLoggingRequest(input)
2099 req.SetContext(ctx)
2100 req.ApplyOptions(opts...)
2101 return out, req.Send()
2102}
2103
2104const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration"
2105
2106// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
2107// client's request for the GetBucketMetricsConfiguration operation. The "output" return
2108// value can be used to capture response data after the request's "Send" method
2109// is called.
2110//
2111// See GetBucketMetricsConfiguration for usage and error information.
2112//
2113// Creating a request object using this method should be used when you want to inject
2114// custom logic into the request's lifecycle using a custom handler, or if you want to
2115// access properties on the request object before or after sending the request. If
2116// you just want the service response, call the GetBucketMetricsConfiguration method directly
2117// instead.
2118//
2119// Note: You must call the "Send" method on the returned request object in order
2120// to execute the request.
2121//
2122// // Example sending a request using the GetBucketMetricsConfigurationRequest method.
2123// req, resp := client.GetBucketMetricsConfigurationRequest(params)
2124//
2125// err := req.Send()
2126// if err == nil { // resp is now filled
2127// fmt.Println(resp)
2128// }
2129//
2130// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration
2131func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) {
2132 op := &request.Operation{
2133 Name: opGetBucketMetricsConfiguration,
2134 HTTPMethod: "GET",
2135 HTTPPath: "/{Bucket}?metrics",
2136 }
2137
2138 if input == nil {
2139 input = &GetBucketMetricsConfigurationInput{}
2140 }
2141
2142 output = &GetBucketMetricsConfigurationOutput{}
2143 req = c.newRequest(op, input, output)
2144 return
2145}
2146
2147// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
2148//
2149// Gets a metrics configuration (specified by the metrics configuration ID)
2150// from the bucket.
2151//
2152// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2153// with awserr.Error's Code and Message methods to get detailed information about
2154// the error.
2155//
2156// See the AWS API reference guide for Amazon Simple Storage Service's
2157// API operation GetBucketMetricsConfiguration for usage and error information.
2158// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration
2159func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) {
2160 req, out := c.GetBucketMetricsConfigurationRequest(input)
2161 return out, req.Send()
2162}
2163
2164// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of
2165// the ability to pass a context and additional request options.
2166//
2167// See GetBucketMetricsConfiguration for details on how to use this API operation.
2168//
2169// The context must be non-nil and will be used for request cancellation. If
2170// the context is nil a panic will occur. In the future the SDK may create
2171// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2172// for more information on using Contexts.
2173func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) {
2174 req, out := c.GetBucketMetricsConfigurationRequest(input)
2175 req.SetContext(ctx)
2176 req.ApplyOptions(opts...)
2177 return out, req.Send()
2178}
2179
2180const opGetBucketNotification = "GetBucketNotification"
2181
2182// GetBucketNotificationRequest generates a "aws/request.Request" representing the
2183// client's request for the GetBucketNotification operation. The "output" return
2184// value can be used to capture response data after the request's "Send" method
2185// is called.
2186//
2187// See GetBucketNotification for usage and error information.
2188//
2189// Creating a request object using this method should be used when you want to inject
2190// custom logic into the request's lifecycle using a custom handler, or if you want to
2191// access properties on the request object before or after sending the request. If
2192// you just want the service response, call the GetBucketNotification method directly
2193// instead.
2194//
2195// Note: You must call the "Send" method on the returned request object in order
2196// to execute the request.
2197//
2198// // Example sending a request using the GetBucketNotificationRequest method.
2199// req, resp := client.GetBucketNotificationRequest(params)
2200//
2201// err := req.Send()
2202// if err == nil { // resp is now filled
2203// fmt.Println(resp)
2204// }
2205//
2206// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
2207func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) {
2208 if c.Client.Config.Logger != nil {
2209 c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated")
2210 }
2211 op := &request.Operation{
2212 Name: opGetBucketNotification,
2213 HTTPMethod: "GET",
2214 HTTPPath: "/{Bucket}?notification",
2215 }
2216
2217 if input == nil {
2218 input = &GetBucketNotificationConfigurationRequest{}
2219 }
2220
2221 output = &NotificationConfigurationDeprecated{}
2222 req = c.newRequest(op, input, output)
2223 return
2224}
2225
2226// GetBucketNotification API operation for Amazon Simple Storage Service.
2227//
2228// Deprecated, see the GetBucketNotificationConfiguration operation.
2229//
2230// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2231// with awserr.Error's Code and Message methods to get detailed information about
2232// the error.
2233//
2234// See the AWS API reference guide for Amazon Simple Storage Service's
2235// API operation GetBucketNotification for usage and error information.
2236// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
2237func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) {
2238 req, out := c.GetBucketNotificationRequest(input)
2239 return out, req.Send()
2240}
2241
2242// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of
2243// the ability to pass a context and additional request options.
2244//
2245// See GetBucketNotification for details on how to use this API operation.
2246//
2247// The context must be non-nil and will be used for request cancellation. If
2248// the context is nil a panic will occur. In the future the SDK may create
2249// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2250// for more information on using Contexts.
2251func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) {
2252 req, out := c.GetBucketNotificationRequest(input)
2253 req.SetContext(ctx)
2254 req.ApplyOptions(opts...)
2255 return out, req.Send()
2256}
2257
2258const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration"
2259
2260// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
2261// client's request for the GetBucketNotificationConfiguration operation. The "output" return
2262// value can be used to capture response data after the request's "Send" method
2263// is called.
2264//
2265// See GetBucketNotificationConfiguration for usage and error information.
2266//
2267// Creating a request object using this method should be used when you want to inject
2268// custom logic into the request's lifecycle using a custom handler, or if you want to
2269// access properties on the request object before or after sending the request. If
2270// you just want the service response, call the GetBucketNotificationConfiguration method directly
2271// instead.
2272//
2273// Note: You must call the "Send" method on the returned request object in order
2274// to execute the request.
2275//
2276// // Example sending a request using the GetBucketNotificationConfigurationRequest method.
2277// req, resp := client.GetBucketNotificationConfigurationRequest(params)
2278//
2279// err := req.Send()
2280// if err == nil { // resp is now filled
2281// fmt.Println(resp)
2282// }
2283//
2284// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration
2285func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) {
2286 op := &request.Operation{
2287 Name: opGetBucketNotificationConfiguration,
2288 HTTPMethod: "GET",
2289 HTTPPath: "/{Bucket}?notification",
2290 }
2291
2292 if input == nil {
2293 input = &GetBucketNotificationConfigurationRequest{}
2294 }
2295
2296 output = &NotificationConfiguration{}
2297 req = c.newRequest(op, input, output)
2298 return
2299}
2300
2301// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service.
2302//
2303// Returns the notification configuration of a bucket.
2304//
2305// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2306// with awserr.Error's Code and Message methods to get detailed information about
2307// the error.
2308//
2309// See the AWS API reference guide for Amazon Simple Storage Service's
2310// API operation GetBucketNotificationConfiguration for usage and error information.
2311// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration
2312func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) {
2313 req, out := c.GetBucketNotificationConfigurationRequest(input)
2314 return out, req.Send()
2315}
2316
2317// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of
2318// the ability to pass a context and additional request options.
2319//
2320// See GetBucketNotificationConfiguration for details on how to use this API operation.
2321//
2322// The context must be non-nil and will be used for request cancellation. If
2323// the context is nil a panic will occur. In the future the SDK may create
2324// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2325// for more information on using Contexts.
2326func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) {
2327 req, out := c.GetBucketNotificationConfigurationRequest(input)
2328 req.SetContext(ctx)
2329 req.ApplyOptions(opts...)
2330 return out, req.Send()
2331}
2332
2333const opGetBucketPolicy = "GetBucketPolicy"
2334
2335// GetBucketPolicyRequest generates a "aws/request.Request" representing the
2336// client's request for the GetBucketPolicy operation. The "output" return
2337// value can be used to capture response data after the request's "Send" method
2338// is called.
2339//
2340// See GetBucketPolicy for usage and error information.
2341//
2342// Creating a request object using this method should be used when you want to inject
2343// custom logic into the request's lifecycle using a custom handler, or if you want to
2344// access properties on the request object before or after sending the request. If
2345// you just want the service response, call the GetBucketPolicy method directly
2346// instead.
2347//
2348// Note: You must call the "Send" method on the returned request object in order
2349// to execute the request.
2350//
2351// // Example sending a request using the GetBucketPolicyRequest method.
2352// req, resp := client.GetBucketPolicyRequest(params)
2353//
2354// err := req.Send()
2355// if err == nil { // resp is now filled
2356// fmt.Println(resp)
2357// }
2358//
2359// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy
2360func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) {
2361 op := &request.Operation{
2362 Name: opGetBucketPolicy,
2363 HTTPMethod: "GET",
2364 HTTPPath: "/{Bucket}?policy",
2365 }
2366
2367 if input == nil {
2368 input = &GetBucketPolicyInput{}
2369 }
2370
2371 output = &GetBucketPolicyOutput{}
2372 req = c.newRequest(op, input, output)
2373 return
2374}
2375
2376// GetBucketPolicy API operation for Amazon Simple Storage Service.
2377//
2378// Returns the policy of a specified bucket.
2379//
2380// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2381// with awserr.Error's Code and Message methods to get detailed information about
2382// the error.
2383//
2384// See the AWS API reference guide for Amazon Simple Storage Service's
2385// API operation GetBucketPolicy for usage and error information.
2386// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy
2387func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) {
2388 req, out := c.GetBucketPolicyRequest(input)
2389 return out, req.Send()
2390}
2391
2392// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of
2393// the ability to pass a context and additional request options.
2394//
2395// See GetBucketPolicy for details on how to use this API operation.
2396//
2397// The context must be non-nil and will be used for request cancellation. If
2398// the context is nil a panic will occur. In the future the SDK may create
2399// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2400// for more information on using Contexts.
2401func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) {
2402 req, out := c.GetBucketPolicyRequest(input)
2403 req.SetContext(ctx)
2404 req.ApplyOptions(opts...)
2405 return out, req.Send()
2406}
2407
2408const opGetBucketReplication = "GetBucketReplication"
2409
2410// GetBucketReplicationRequest generates a "aws/request.Request" representing the
2411// client's request for the GetBucketReplication operation. The "output" return
2412// value can be used to capture response data after the request's "Send" method
2413// is called.
2414//
2415// See GetBucketReplication for usage and error information.
2416//
2417// Creating a request object using this method should be used when you want to inject
2418// custom logic into the request's lifecycle using a custom handler, or if you want to
2419// access properties on the request object before or after sending the request. If
2420// you just want the service response, call the GetBucketReplication method directly
2421// instead.
2422//
2423// Note: You must call the "Send" method on the returned request object in order
2424// to execute the request.
2425//
2426// // Example sending a request using the GetBucketReplicationRequest method.
2427// req, resp := client.GetBucketReplicationRequest(params)
2428//
2429// err := req.Send()
2430// if err == nil { // resp is now filled
2431// fmt.Println(resp)
2432// }
2433//
2434// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
2435func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) {
2436 op := &request.Operation{
2437 Name: opGetBucketReplication,
2438 HTTPMethod: "GET",
2439 HTTPPath: "/{Bucket}?replication",
2440 }
2441
2442 if input == nil {
2443 input = &GetBucketReplicationInput{}
2444 }
2445
2446 output = &GetBucketReplicationOutput{}
2447 req = c.newRequest(op, input, output)
2448 return
2449}
2450
2451// GetBucketReplication API operation for Amazon Simple Storage Service.
2452//
2453// Returns the replication configuration of a bucket.
2454//
2455// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2456// with awserr.Error's Code and Message methods to get detailed information about
2457// the error.
2458//
2459// See the AWS API reference guide for Amazon Simple Storage Service's
2460// API operation GetBucketReplication for usage and error information.
2461// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
2462func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
2463 req, out := c.GetBucketReplicationRequest(input)
2464 return out, req.Send()
2465}
2466
2467// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of
2468// the ability to pass a context and additional request options.
2469//
2470// See GetBucketReplication for details on how to use this API operation.
2471//
2472// The context must be non-nil and will be used for request cancellation. If
2473// the context is nil a panic will occur. In the future the SDK may create
2474// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2475// for more information on using Contexts.
2476func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) {
2477 req, out := c.GetBucketReplicationRequest(input)
2478 req.SetContext(ctx)
2479 req.ApplyOptions(opts...)
2480 return out, req.Send()
2481}
2482
2483const opGetBucketRequestPayment = "GetBucketRequestPayment"
2484
2485// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the
2486// client's request for the GetBucketRequestPayment operation. The "output" return
2487// value can be used to capture response data after the request's "Send" method
2488// is called.
2489//
2490// See GetBucketRequestPayment for usage and error information.
2491//
2492// Creating a request object using this method should be used when you want to inject
2493// custom logic into the request's lifecycle using a custom handler, or if you want to
2494// access properties on the request object before or after sending the request. If
2495// you just want the service response, call the GetBucketRequestPayment method directly
2496// instead.
2497//
2498// Note: You must call the "Send" method on the returned request object in order
2499// to execute the request.
2500//
2501// // Example sending a request using the GetBucketRequestPaymentRequest method.
2502// req, resp := client.GetBucketRequestPaymentRequest(params)
2503//
2504// err := req.Send()
2505// if err == nil { // resp is now filled
2506// fmt.Println(resp)
2507// }
2508//
2509// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment
2510func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) {
2511 op := &request.Operation{
2512 Name: opGetBucketRequestPayment,
2513 HTTPMethod: "GET",
2514 HTTPPath: "/{Bucket}?requestPayment",
2515 }
2516
2517 if input == nil {
2518 input = &GetBucketRequestPaymentInput{}
2519 }
2520
2521 output = &GetBucketRequestPaymentOutput{}
2522 req = c.newRequest(op, input, output)
2523 return
2524}
2525
2526// GetBucketRequestPayment API operation for Amazon Simple Storage Service.
2527//
2528// Returns the request payment configuration of a bucket.
2529//
2530// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2531// with awserr.Error's Code and Message methods to get detailed information about
2532// the error.
2533//
2534// See the AWS API reference guide for Amazon Simple Storage Service's
2535// API operation GetBucketRequestPayment for usage and error information.
2536// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment
2537func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) {
2538 req, out := c.GetBucketRequestPaymentRequest(input)
2539 return out, req.Send()
2540}
2541
2542// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of
2543// the ability to pass a context and additional request options.
2544//
2545// See GetBucketRequestPayment for details on how to use this API operation.
2546//
2547// The context must be non-nil and will be used for request cancellation. If
2548// the context is nil a panic will occur. In the future the SDK may create
2549// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2550// for more information on using Contexts.
2551func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) {
2552 req, out := c.GetBucketRequestPaymentRequest(input)
2553 req.SetContext(ctx)
2554 req.ApplyOptions(opts...)
2555 return out, req.Send()
2556}
2557
2558const opGetBucketTagging = "GetBucketTagging"
2559
2560// GetBucketTaggingRequest generates a "aws/request.Request" representing the
2561// client's request for the GetBucketTagging operation. The "output" return
2562// value can be used to capture response data after the request's "Send" method
2563// is called.
2564//
2565// See GetBucketTagging for usage and error information.
2566//
2567// Creating a request object using this method should be used when you want to inject
2568// custom logic into the request's lifecycle using a custom handler, or if you want to
2569// access properties on the request object before or after sending the request. If
2570// you just want the service response, call the GetBucketTagging method directly
2571// instead.
2572//
2573// Note: You must call the "Send" method on the returned request object in order
2574// to execute the request.
2575//
2576// // Example sending a request using the GetBucketTaggingRequest method.
2577// req, resp := client.GetBucketTaggingRequest(params)
2578//
2579// err := req.Send()
2580// if err == nil { // resp is now filled
2581// fmt.Println(resp)
2582// }
2583//
2584// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging
2585func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) {
2586 op := &request.Operation{
2587 Name: opGetBucketTagging,
2588 HTTPMethod: "GET",
2589 HTTPPath: "/{Bucket}?tagging",
2590 }
2591
2592 if input == nil {
2593 input = &GetBucketTaggingInput{}
2594 }
2595
2596 output = &GetBucketTaggingOutput{}
2597 req = c.newRequest(op, input, output)
2598 return
2599}
2600
2601// GetBucketTagging API operation for Amazon Simple Storage Service.
2602//
2603// Returns the tag set associated with the bucket.
2604//
2605// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2606// with awserr.Error's Code and Message methods to get detailed information about
2607// the error.
2608//
2609// See the AWS API reference guide for Amazon Simple Storage Service's
2610// API operation GetBucketTagging for usage and error information.
2611// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging
2612func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) {
2613 req, out := c.GetBucketTaggingRequest(input)
2614 return out, req.Send()
2615}
2616
2617// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of
2618// the ability to pass a context and additional request options.
2619//
2620// See GetBucketTagging for details on how to use this API operation.
2621//
2622// The context must be non-nil and will be used for request cancellation. If
2623// the context is nil a panic will occur. In the future the SDK may create
2624// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2625// for more information on using Contexts.
2626func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) {
2627 req, out := c.GetBucketTaggingRequest(input)
2628 req.SetContext(ctx)
2629 req.ApplyOptions(opts...)
2630 return out, req.Send()
2631}
2632
2633const opGetBucketVersioning = "GetBucketVersioning"
2634
2635// GetBucketVersioningRequest generates a "aws/request.Request" representing the
2636// client's request for the GetBucketVersioning operation. The "output" return
2637// value can be used to capture response data after the request's "Send" method
2638// is called.
2639//
2640// See GetBucketVersioning for usage and error information.
2641//
2642// Creating a request object using this method should be used when you want to inject
2643// custom logic into the request's lifecycle using a custom handler, or if you want to
2644// access properties on the request object before or after sending the request. If
2645// you just want the service response, call the GetBucketVersioning method directly
2646// instead.
2647//
2648// Note: You must call the "Send" method on the returned request object in order
2649// to execute the request.
2650//
2651// // Example sending a request using the GetBucketVersioningRequest method.
2652// req, resp := client.GetBucketVersioningRequest(params)
2653//
2654// err := req.Send()
2655// if err == nil { // resp is now filled
2656// fmt.Println(resp)
2657// }
2658//
2659// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
2660func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) {
2661 op := &request.Operation{
2662 Name: opGetBucketVersioning,
2663 HTTPMethod: "GET",
2664 HTTPPath: "/{Bucket}?versioning",
2665 }
2666
2667 if input == nil {
2668 input = &GetBucketVersioningInput{}
2669 }
2670
2671 output = &GetBucketVersioningOutput{}
2672 req = c.newRequest(op, input, output)
2673 return
2674}
2675
2676// GetBucketVersioning API operation for Amazon Simple Storage Service.
2677//
2678// Returns the versioning state of a bucket.
2679//
2680// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2681// with awserr.Error's Code and Message methods to get detailed information about
2682// the error.
2683//
2684// See the AWS API reference guide for Amazon Simple Storage Service's
2685// API operation GetBucketVersioning for usage and error information.
2686// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
2687func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) {
2688 req, out := c.GetBucketVersioningRequest(input)
2689 return out, req.Send()
2690}
2691
2692// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of
2693// the ability to pass a context and additional request options.
2694//
2695// See GetBucketVersioning for details on how to use this API operation.
2696//
2697// The context must be non-nil and will be used for request cancellation. If
2698// the context is nil a panic will occur. In the future the SDK may create
2699// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2700// for more information on using Contexts.
2701func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) {
2702 req, out := c.GetBucketVersioningRequest(input)
2703 req.SetContext(ctx)
2704 req.ApplyOptions(opts...)
2705 return out, req.Send()
2706}
2707
2708const opGetBucketWebsite = "GetBucketWebsite"
2709
2710// GetBucketWebsiteRequest generates a "aws/request.Request" representing the
2711// client's request for the GetBucketWebsite operation. The "output" return
2712// value can be used to capture response data after the request's "Send" method
2713// is called.
2714//
2715// See GetBucketWebsite for usage and error information.
2716//
2717// Creating a request object using this method should be used when you want to inject
2718// custom logic into the request's lifecycle using a custom handler, or if you want to
2719// access properties on the request object before or after sending the request. If
2720// you just want the service response, call the GetBucketWebsite method directly
2721// instead.
2722//
2723// Note: You must call the "Send" method on the returned request object in order
2724// to execute the request.
2725//
2726// // Example sending a request using the GetBucketWebsiteRequest method.
2727// req, resp := client.GetBucketWebsiteRequest(params)
2728//
2729// err := req.Send()
2730// if err == nil { // resp is now filled
2731// fmt.Println(resp)
2732// }
2733//
2734// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
2735func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) {
2736 op := &request.Operation{
2737 Name: opGetBucketWebsite,
2738 HTTPMethod: "GET",
2739 HTTPPath: "/{Bucket}?website",
2740 }
2741
2742 if input == nil {
2743 input = &GetBucketWebsiteInput{}
2744 }
2745
2746 output = &GetBucketWebsiteOutput{}
2747 req = c.newRequest(op, input, output)
2748 return
2749}
2750
2751// GetBucketWebsite API operation for Amazon Simple Storage Service.
2752//
2753// Returns the website configuration for a bucket.
2754//
2755// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2756// with awserr.Error's Code and Message methods to get detailed information about
2757// the error.
2758//
2759// See the AWS API reference guide for Amazon Simple Storage Service's
2760// API operation GetBucketWebsite for usage and error information.
2761// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
2762func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) {
2763 req, out := c.GetBucketWebsiteRequest(input)
2764 return out, req.Send()
2765}
2766
2767// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of
2768// the ability to pass a context and additional request options.
2769//
2770// See GetBucketWebsite for details on how to use this API operation.
2771//
2772// The context must be non-nil and will be used for request cancellation. If
2773// the context is nil a panic will occur. In the future the SDK may create
2774// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2775// for more information on using Contexts.
2776func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) {
2777 req, out := c.GetBucketWebsiteRequest(input)
2778 req.SetContext(ctx)
2779 req.ApplyOptions(opts...)
2780 return out, req.Send()
2781}
2782
2783const opGetObject = "GetObject"
2784
2785// GetObjectRequest generates a "aws/request.Request" representing the
2786// client's request for the GetObject operation. The "output" return
2787// value can be used to capture response data after the request's "Send" method
2788// is called.
2789//
2790// See GetObject for usage and error information.
2791//
2792// Creating a request object using this method should be used when you want to inject
2793// custom logic into the request's lifecycle using a custom handler, or if you want to
2794// access properties on the request object before or after sending the request. If
2795// you just want the service response, call the GetObject method directly
2796// instead.
2797//
2798// Note: You must call the "Send" method on the returned request object in order
2799// to execute the request.
2800//
2801// // Example sending a request using the GetObjectRequest method.
2802// req, resp := client.GetObjectRequest(params)
2803//
2804// err := req.Send()
2805// if err == nil { // resp is now filled
2806// fmt.Println(resp)
2807// }
2808//
2809// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
2810func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) {
2811 op := &request.Operation{
2812 Name: opGetObject,
2813 HTTPMethod: "GET",
2814 HTTPPath: "/{Bucket}/{Key+}",
2815 }
2816
2817 if input == nil {
2818 input = &GetObjectInput{}
2819 }
2820
2821 output = &GetObjectOutput{}
2822 req = c.newRequest(op, input, output)
2823 return
2824}
2825
2826// GetObject API operation for Amazon Simple Storage Service.
2827//
2828// Retrieves objects from Amazon S3.
2829//
2830// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2831// with awserr.Error's Code and Message methods to get detailed information about
2832// the error.
2833//
2834// See the AWS API reference guide for Amazon Simple Storage Service's
2835// API operation GetObject for usage and error information.
2836//
2837// Returned Error Codes:
2838// * ErrCodeNoSuchKey "NoSuchKey"
2839// The specified key does not exist.
2840//
2841// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
2842func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) {
2843 req, out := c.GetObjectRequest(input)
2844 return out, req.Send()
2845}
2846
2847// GetObjectWithContext is the same as GetObject with the addition of
2848// the ability to pass a context and additional request options.
2849//
2850// See GetObject for details on how to use this API operation.
2851//
2852// The context must be non-nil and will be used for request cancellation. If
2853// the context is nil a panic will occur. In the future the SDK may create
2854// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2855// for more information on using Contexts.
2856func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) {
2857 req, out := c.GetObjectRequest(input)
2858 req.SetContext(ctx)
2859 req.ApplyOptions(opts...)
2860 return out, req.Send()
2861}
2862
2863const opGetObjectAcl = "GetObjectAcl"
2864
2865// GetObjectAclRequest generates a "aws/request.Request" representing the
2866// client's request for the GetObjectAcl operation. The "output" return
2867// value can be used to capture response data after the request's "Send" method
2868// is called.
2869//
2870// See GetObjectAcl for usage and error information.
2871//
2872// Creating a request object using this method should be used when you want to inject
2873// custom logic into the request's lifecycle using a custom handler, or if you want to
2874// access properties on the request object before or after sending the request. If
2875// you just want the service response, call the GetObjectAcl method directly
2876// instead.
2877//
2878// Note: You must call the "Send" method on the returned request object in order
2879// to execute the request.
2880//
2881// // Example sending a request using the GetObjectAclRequest method.
2882// req, resp := client.GetObjectAclRequest(params)
2883//
2884// err := req.Send()
2885// if err == nil { // resp is now filled
2886// fmt.Println(resp)
2887// }
2888//
2889// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
2890func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) {
2891 op := &request.Operation{
2892 Name: opGetObjectAcl,
2893 HTTPMethod: "GET",
2894 HTTPPath: "/{Bucket}/{Key+}?acl",
2895 }
2896
2897 if input == nil {
2898 input = &GetObjectAclInput{}
2899 }
2900
2901 output = &GetObjectAclOutput{}
2902 req = c.newRequest(op, input, output)
2903 return
2904}
2905
2906// GetObjectAcl API operation for Amazon Simple Storage Service.
2907//
2908// Returns the access control list (ACL) of an object.
2909//
2910// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2911// with awserr.Error's Code and Message methods to get detailed information about
2912// the error.
2913//
2914// See the AWS API reference guide for Amazon Simple Storage Service's
2915// API operation GetObjectAcl for usage and error information.
2916//
2917// Returned Error Codes:
2918// * ErrCodeNoSuchKey "NoSuchKey"
2919// The specified key does not exist.
2920//
2921// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
2922func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) {
2923 req, out := c.GetObjectAclRequest(input)
2924 return out, req.Send()
2925}
2926
2927// GetObjectAclWithContext is the same as GetObjectAcl with the addition of
2928// the ability to pass a context and additional request options.
2929//
2930// See GetObjectAcl for details on how to use this API operation.
2931//
2932// The context must be non-nil and will be used for request cancellation. If
2933// the context is nil a panic will occur. In the future the SDK may create
2934// sub-contexts for http.Requests. See https://golang.org/pkg/context/
2935// for more information on using Contexts.
2936func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) {
2937 req, out := c.GetObjectAclRequest(input)
2938 req.SetContext(ctx)
2939 req.ApplyOptions(opts...)
2940 return out, req.Send()
2941}
2942
2943const opGetObjectTagging = "GetObjectTagging"
2944
2945// GetObjectTaggingRequest generates a "aws/request.Request" representing the
2946// client's request for the GetObjectTagging operation. The "output" return
2947// value can be used to capture response data after the request's "Send" method
2948// is called.
2949//
2950// See GetObjectTagging for usage and error information.
2951//
2952// Creating a request object using this method should be used when you want to inject
2953// custom logic into the request's lifecycle using a custom handler, or if you want to
2954// access properties on the request object before or after sending the request. If
2955// you just want the service response, call the GetObjectTagging method directly
2956// instead.
2957//
2958// Note: You must call the "Send" method on the returned request object in order
2959// to execute the request.
2960//
2961// // Example sending a request using the GetObjectTaggingRequest method.
2962// req, resp := client.GetObjectTaggingRequest(params)
2963//
2964// err := req.Send()
2965// if err == nil { // resp is now filled
2966// fmt.Println(resp)
2967// }
2968//
2969// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
2970func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) {
2971 op := &request.Operation{
2972 Name: opGetObjectTagging,
2973 HTTPMethod: "GET",
2974 HTTPPath: "/{Bucket}/{Key+}?tagging",
2975 }
2976
2977 if input == nil {
2978 input = &GetObjectTaggingInput{}
2979 }
2980
2981 output = &GetObjectTaggingOutput{}
2982 req = c.newRequest(op, input, output)
2983 return
2984}
2985
2986// GetObjectTagging API operation for Amazon Simple Storage Service.
2987//
2988// Returns the tag-set of an object.
2989//
2990// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
2991// with awserr.Error's Code and Message methods to get detailed information about
2992// the error.
2993//
2994// See the AWS API reference guide for Amazon Simple Storage Service's
2995// API operation GetObjectTagging for usage and error information.
2996// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
2997func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) {
2998 req, out := c.GetObjectTaggingRequest(input)
2999 return out, req.Send()
3000}
3001
3002// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of
3003// the ability to pass a context and additional request options.
3004//
3005// See GetObjectTagging for details on how to use this API operation.
3006//
3007// The context must be non-nil and will be used for request cancellation. If
3008// the context is nil a panic will occur. In the future the SDK may create
3009// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3010// for more information on using Contexts.
3011func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) {
3012 req, out := c.GetObjectTaggingRequest(input)
3013 req.SetContext(ctx)
3014 req.ApplyOptions(opts...)
3015 return out, req.Send()
3016}
3017
3018const opGetObjectTorrent = "GetObjectTorrent"
3019
3020// GetObjectTorrentRequest generates a "aws/request.Request" representing the
3021// client's request for the GetObjectTorrent operation. The "output" return
3022// value can be used to capture response data after the request's "Send" method
3023// is called.
3024//
3025// See GetObjectTorrent for usage and error information.
3026//
3027// Creating a request object using this method should be used when you want to inject
3028// custom logic into the request's lifecycle using a custom handler, or if you want to
3029// access properties on the request object before or after sending the request. If
3030// you just want the service response, call the GetObjectTorrent method directly
3031// instead.
3032//
3033// Note: You must call the "Send" method on the returned request object in order
3034// to execute the request.
3035//
3036// // Example sending a request using the GetObjectTorrentRequest method.
3037// req, resp := client.GetObjectTorrentRequest(params)
3038//
3039// err := req.Send()
3040// if err == nil { // resp is now filled
3041// fmt.Println(resp)
3042// }
3043//
3044// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent
3045func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) {
3046 op := &request.Operation{
3047 Name: opGetObjectTorrent,
3048 HTTPMethod: "GET",
3049 HTTPPath: "/{Bucket}/{Key+}?torrent",
3050 }
3051
3052 if input == nil {
3053 input = &GetObjectTorrentInput{}
3054 }
3055
3056 output = &GetObjectTorrentOutput{}
3057 req = c.newRequest(op, input, output)
3058 return
3059}
3060
3061// GetObjectTorrent API operation for Amazon Simple Storage Service.
3062//
3063// Return torrent files from a bucket.
3064//
3065// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3066// with awserr.Error's Code and Message methods to get detailed information about
3067// the error.
3068//
3069// See the AWS API reference guide for Amazon Simple Storage Service's
3070// API operation GetObjectTorrent for usage and error information.
3071// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent
3072func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) {
3073 req, out := c.GetObjectTorrentRequest(input)
3074 return out, req.Send()
3075}
3076
3077// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of
3078// the ability to pass a context and additional request options.
3079//
3080// See GetObjectTorrent for details on how to use this API operation.
3081//
3082// The context must be non-nil and will be used for request cancellation. If
3083// the context is nil a panic will occur. In the future the SDK may create
3084// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3085// for more information on using Contexts.
3086func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) {
3087 req, out := c.GetObjectTorrentRequest(input)
3088 req.SetContext(ctx)
3089 req.ApplyOptions(opts...)
3090 return out, req.Send()
3091}
3092
3093const opHeadBucket = "HeadBucket"
3094
3095// HeadBucketRequest generates a "aws/request.Request" representing the
3096// client's request for the HeadBucket operation. The "output" return
3097// value can be used to capture response data after the request's "Send" method
3098// is called.
3099//
3100// See HeadBucket for usage and error information.
3101//
3102// Creating a request object using this method should be used when you want to inject
3103// custom logic into the request's lifecycle using a custom handler, or if you want to
3104// access properties on the request object before or after sending the request. If
3105// you just want the service response, call the HeadBucket method directly
3106// instead.
3107//
3108// Note: You must call the "Send" method on the returned request object in order
3109// to execute the request.
3110//
3111// // Example sending a request using the HeadBucketRequest method.
3112// req, resp := client.HeadBucketRequest(params)
3113//
3114// err := req.Send()
3115// if err == nil { // resp is now filled
3116// fmt.Println(resp)
3117// }
3118//
3119// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
3120func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) {
3121 op := &request.Operation{
3122 Name: opHeadBucket,
3123 HTTPMethod: "HEAD",
3124 HTTPPath: "/{Bucket}",
3125 }
3126
3127 if input == nil {
3128 input = &HeadBucketInput{}
3129 }
3130
3131 output = &HeadBucketOutput{}
3132 req = c.newRequest(op, input, output)
3133 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
3134 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
3135 return
3136}
3137
3138// HeadBucket API operation for Amazon Simple Storage Service.
3139//
3140// This operation is useful to determine if a bucket exists and you have permission
3141// to access it.
3142//
3143// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3144// with awserr.Error's Code and Message methods to get detailed information about
3145// the error.
3146//
3147// See the AWS API reference guide for Amazon Simple Storage Service's
3148// API operation HeadBucket for usage and error information.
3149//
3150// Returned Error Codes:
3151// * ErrCodeNoSuchBucket "NoSuchBucket"
3152// The specified bucket does not exist.
3153//
3154// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
3155func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) {
3156 req, out := c.HeadBucketRequest(input)
3157 return out, req.Send()
3158}
3159
3160// HeadBucketWithContext is the same as HeadBucket with the addition of
3161// the ability to pass a context and additional request options.
3162//
3163// See HeadBucket for details on how to use this API operation.
3164//
3165// The context must be non-nil and will be used for request cancellation. If
3166// the context is nil a panic will occur. In the future the SDK may create
3167// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3168// for more information on using Contexts.
3169func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) {
3170 req, out := c.HeadBucketRequest(input)
3171 req.SetContext(ctx)
3172 req.ApplyOptions(opts...)
3173 return out, req.Send()
3174}
3175
3176const opHeadObject = "HeadObject"
3177
3178// HeadObjectRequest generates a "aws/request.Request" representing the
3179// client's request for the HeadObject operation. The "output" return
3180// value can be used to capture response data after the request's "Send" method
3181// is called.
3182//
3183// See HeadObject for usage and error information.
3184//
3185// Creating a request object using this method should be used when you want to inject
3186// custom logic into the request's lifecycle using a custom handler, or if you want to
3187// access properties on the request object before or after sending the request. If
3188// you just want the service response, call the HeadObject method directly
3189// instead.
3190//
3191// Note: You must call the "Send" method on the returned request object in order
3192// to execute the request.
3193//
3194// // Example sending a request using the HeadObjectRequest method.
3195// req, resp := client.HeadObjectRequest(params)
3196//
3197// err := req.Send()
3198// if err == nil { // resp is now filled
3199// fmt.Println(resp)
3200// }
3201//
3202// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
3203func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) {
3204 op := &request.Operation{
3205 Name: opHeadObject,
3206 HTTPMethod: "HEAD",
3207 HTTPPath: "/{Bucket}/{Key+}",
3208 }
3209
3210 if input == nil {
3211 input = &HeadObjectInput{}
3212 }
3213
3214 output = &HeadObjectOutput{}
3215 req = c.newRequest(op, input, output)
3216 return
3217}
3218
3219// HeadObject API operation for Amazon Simple Storage Service.
3220//
3221// The HEAD operation retrieves metadata from an object without returning the
3222// object itself. This operation is useful if you're only interested in an object's
3223// metadata. To use HEAD, you must have READ access to the object.
3224//
3225// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses
3226// for more information on returned errors.
3227//
3228// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3229// with awserr.Error's Code and Message methods to get detailed information about
3230// the error.
3231//
3232// See the AWS API reference guide for Amazon Simple Storage Service's
3233// API operation HeadObject for usage and error information.
3234// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
3235func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) {
3236 req, out := c.HeadObjectRequest(input)
3237 return out, req.Send()
3238}
3239
3240// HeadObjectWithContext is the same as HeadObject with the addition of
3241// the ability to pass a context and additional request options.
3242//
3243// See HeadObject for details on how to use this API operation.
3244//
3245// The context must be non-nil and will be used for request cancellation. If
3246// the context is nil a panic will occur. In the future the SDK may create
3247// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3248// for more information on using Contexts.
3249func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) {
3250 req, out := c.HeadObjectRequest(input)
3251 req.SetContext(ctx)
3252 req.ApplyOptions(opts...)
3253 return out, req.Send()
3254}
3255
3256const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations"
3257
3258// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the
3259// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return
3260// value can be used to capture response data after the request's "Send" method
3261// is called.
3262//
3263// See ListBucketAnalyticsConfigurations for usage and error information.
3264//
3265// Creating a request object using this method should be used when you want to inject
3266// custom logic into the request's lifecycle using a custom handler, or if you want to
3267// access properties on the request object before or after sending the request. If
3268// you just want the service response, call the ListBucketAnalyticsConfigurations method directly
3269// instead.
3270//
3271// Note: You must call the "Send" method on the returned request object in order
3272// to execute the request.
3273//
3274// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method.
3275// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params)
3276//
3277// err := req.Send()
3278// if err == nil { // resp is now filled
3279// fmt.Println(resp)
3280// }
3281//
3282// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations
3283func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) {
3284 op := &request.Operation{
3285 Name: opListBucketAnalyticsConfigurations,
3286 HTTPMethod: "GET",
3287 HTTPPath: "/{Bucket}?analytics",
3288 }
3289
3290 if input == nil {
3291 input = &ListBucketAnalyticsConfigurationsInput{}
3292 }
3293
3294 output = &ListBucketAnalyticsConfigurationsOutput{}
3295 req = c.newRequest(op, input, output)
3296 return
3297}
3298
3299// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service.
3300//
3301// Lists the analytics configurations for the bucket.
3302//
3303// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3304// with awserr.Error's Code and Message methods to get detailed information about
3305// the error.
3306//
3307// See the AWS API reference guide for Amazon Simple Storage Service's
3308// API operation ListBucketAnalyticsConfigurations for usage and error information.
3309// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations
3310func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) {
3311 req, out := c.ListBucketAnalyticsConfigurationsRequest(input)
3312 return out, req.Send()
3313}
3314
3315// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of
3316// the ability to pass a context and additional request options.
3317//
3318// See ListBucketAnalyticsConfigurations for details on how to use this API operation.
3319//
3320// The context must be non-nil and will be used for request cancellation. If
3321// the context is nil a panic will occur. In the future the SDK may create
3322// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3323// for more information on using Contexts.
3324func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) {
3325 req, out := c.ListBucketAnalyticsConfigurationsRequest(input)
3326 req.SetContext(ctx)
3327 req.ApplyOptions(opts...)
3328 return out, req.Send()
3329}
3330
3331const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations"
3332
3333// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the
3334// client's request for the ListBucketInventoryConfigurations operation. The "output" return
3335// value can be used to capture response data after the request's "Send" method
3336// is called.
3337//
3338// See ListBucketInventoryConfigurations for usage and error information.
3339//
3340// Creating a request object using this method should be used when you want to inject
3341// custom logic into the request's lifecycle using a custom handler, or if you want to
3342// access properties on the request object before or after sending the request. If
3343// you just want the service response, call the ListBucketInventoryConfigurations method directly
3344// instead.
3345//
3346// Note: You must call the "Send" method on the returned request object in order
3347// to execute the request.
3348//
3349// // Example sending a request using the ListBucketInventoryConfigurationsRequest method.
3350// req, resp := client.ListBucketInventoryConfigurationsRequest(params)
3351//
3352// err := req.Send()
3353// if err == nil { // resp is now filled
3354// fmt.Println(resp)
3355// }
3356//
3357// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations
3358func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) {
3359 op := &request.Operation{
3360 Name: opListBucketInventoryConfigurations,
3361 HTTPMethod: "GET",
3362 HTTPPath: "/{Bucket}?inventory",
3363 }
3364
3365 if input == nil {
3366 input = &ListBucketInventoryConfigurationsInput{}
3367 }
3368
3369 output = &ListBucketInventoryConfigurationsOutput{}
3370 req = c.newRequest(op, input, output)
3371 return
3372}
3373
3374// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service.
3375//
3376// Returns a list of inventory configurations for the bucket.
3377//
3378// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3379// with awserr.Error's Code and Message methods to get detailed information about
3380// the error.
3381//
3382// See the AWS API reference guide for Amazon Simple Storage Service's
3383// API operation ListBucketInventoryConfigurations for usage and error information.
3384// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations
3385func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) {
3386 req, out := c.ListBucketInventoryConfigurationsRequest(input)
3387 return out, req.Send()
3388}
3389
3390// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of
3391// the ability to pass a context and additional request options.
3392//
3393// See ListBucketInventoryConfigurations for details on how to use this API operation.
3394//
3395// The context must be non-nil and will be used for request cancellation. If
3396// the context is nil a panic will occur. In the future the SDK may create
3397// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3398// for more information on using Contexts.
3399func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) {
3400 req, out := c.ListBucketInventoryConfigurationsRequest(input)
3401 req.SetContext(ctx)
3402 req.ApplyOptions(opts...)
3403 return out, req.Send()
3404}
3405
3406const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations"
3407
3408// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the
3409// client's request for the ListBucketMetricsConfigurations operation. The "output" return
3410// value can be used to capture response data after the request's "Send" method
3411// is called.
3412//
3413// See ListBucketMetricsConfigurations for usage and error information.
3414//
3415// Creating a request object using this method should be used when you want to inject
3416// custom logic into the request's lifecycle using a custom handler, or if you want to
3417// access properties on the request object before or after sending the request. If
3418// you just want the service response, call the ListBucketMetricsConfigurations method directly
3419// instead.
3420//
3421// Note: You must call the "Send" method on the returned request object in order
3422// to execute the request.
3423//
3424// // Example sending a request using the ListBucketMetricsConfigurationsRequest method.
3425// req, resp := client.ListBucketMetricsConfigurationsRequest(params)
3426//
3427// err := req.Send()
3428// if err == nil { // resp is now filled
3429// fmt.Println(resp)
3430// }
3431//
3432// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations
3433func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) {
3434 op := &request.Operation{
3435 Name: opListBucketMetricsConfigurations,
3436 HTTPMethod: "GET",
3437 HTTPPath: "/{Bucket}?metrics",
3438 }
3439
3440 if input == nil {
3441 input = &ListBucketMetricsConfigurationsInput{}
3442 }
3443
3444 output = &ListBucketMetricsConfigurationsOutput{}
3445 req = c.newRequest(op, input, output)
3446 return
3447}
3448
3449// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service.
3450//
3451// Lists the metrics configurations for the bucket.
3452//
3453// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3454// with awserr.Error's Code and Message methods to get detailed information about
3455// the error.
3456//
3457// See the AWS API reference guide for Amazon Simple Storage Service's
3458// API operation ListBucketMetricsConfigurations for usage and error information.
3459// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations
3460func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) {
3461 req, out := c.ListBucketMetricsConfigurationsRequest(input)
3462 return out, req.Send()
3463}
3464
3465// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of
3466// the ability to pass a context and additional request options.
3467//
3468// See ListBucketMetricsConfigurations for details on how to use this API operation.
3469//
3470// The context must be non-nil and will be used for request cancellation. If
3471// the context is nil a panic will occur. In the future the SDK may create
3472// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3473// for more information on using Contexts.
3474func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) {
3475 req, out := c.ListBucketMetricsConfigurationsRequest(input)
3476 req.SetContext(ctx)
3477 req.ApplyOptions(opts...)
3478 return out, req.Send()
3479}
3480
3481const opListBuckets = "ListBuckets"
3482
3483// ListBucketsRequest generates a "aws/request.Request" representing the
3484// client's request for the ListBuckets operation. The "output" return
3485// value can be used to capture response data after the request's "Send" method
3486// is called.
3487//
3488// See ListBuckets for usage and error information.
3489//
3490// Creating a request object using this method should be used when you want to inject
3491// custom logic into the request's lifecycle using a custom handler, or if you want to
3492// access properties on the request object before or after sending the request. If
3493// you just want the service response, call the ListBuckets method directly
3494// instead.
3495//
3496// Note: You must call the "Send" method on the returned request object in order
3497// to execute the request.
3498//
3499// // Example sending a request using the ListBucketsRequest method.
3500// req, resp := client.ListBucketsRequest(params)
3501//
3502// err := req.Send()
3503// if err == nil { // resp is now filled
3504// fmt.Println(resp)
3505// }
3506//
3507// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
3508func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) {
3509 op := &request.Operation{
3510 Name: opListBuckets,
3511 HTTPMethod: "GET",
3512 HTTPPath: "/",
3513 }
3514
3515 if input == nil {
3516 input = &ListBucketsInput{}
3517 }
3518
3519 output = &ListBucketsOutput{}
3520 req = c.newRequest(op, input, output)
3521 return
3522}
3523
3524// ListBuckets API operation for Amazon Simple Storage Service.
3525//
3526// Returns a list of all buckets owned by the authenticated sender of the request.
3527//
3528// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3529// with awserr.Error's Code and Message methods to get detailed information about
3530// the error.
3531//
3532// See the AWS API reference guide for Amazon Simple Storage Service's
3533// API operation ListBuckets for usage and error information.
3534// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
3535func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) {
3536 req, out := c.ListBucketsRequest(input)
3537 return out, req.Send()
3538}
3539
3540// ListBucketsWithContext is the same as ListBuckets with the addition of
3541// the ability to pass a context and additional request options.
3542//
3543// See ListBuckets for details on how to use this API operation.
3544//
3545// The context must be non-nil and will be used for request cancellation. If
3546// the context is nil a panic will occur. In the future the SDK may create
3547// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3548// for more information on using Contexts.
3549func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) {
3550 req, out := c.ListBucketsRequest(input)
3551 req.SetContext(ctx)
3552 req.ApplyOptions(opts...)
3553 return out, req.Send()
3554}
3555
3556const opListMultipartUploads = "ListMultipartUploads"
3557
3558// ListMultipartUploadsRequest generates a "aws/request.Request" representing the
3559// client's request for the ListMultipartUploads operation. The "output" return
3560// value can be used to capture response data after the request's "Send" method
3561// is called.
3562//
3563// See ListMultipartUploads for usage and error information.
3564//
3565// Creating a request object using this method should be used when you want to inject
3566// custom logic into the request's lifecycle using a custom handler, or if you want to
3567// access properties on the request object before or after sending the request. If
3568// you just want the service response, call the ListMultipartUploads method directly
3569// instead.
3570//
3571// Note: You must call the "Send" method on the returned request object in order
3572// to execute the request.
3573//
3574// // Example sending a request using the ListMultipartUploadsRequest method.
3575// req, resp := client.ListMultipartUploadsRequest(params)
3576//
3577// err := req.Send()
3578// if err == nil { // resp is now filled
3579// fmt.Println(resp)
3580// }
3581//
3582// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
3583func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) {
3584 op := &request.Operation{
3585 Name: opListMultipartUploads,
3586 HTTPMethod: "GET",
3587 HTTPPath: "/{Bucket}?uploads",
3588 Paginator: &request.Paginator{
3589 InputTokens: []string{"KeyMarker", "UploadIdMarker"},
3590 OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"},
3591 LimitToken: "MaxUploads",
3592 TruncationToken: "IsTruncated",
3593 },
3594 }
3595
3596 if input == nil {
3597 input = &ListMultipartUploadsInput{}
3598 }
3599
3600 output = &ListMultipartUploadsOutput{}
3601 req = c.newRequest(op, input, output)
3602 return
3603}
3604
3605// ListMultipartUploads API operation for Amazon Simple Storage Service.
3606//
3607// This operation lists in-progress multipart uploads.
3608//
3609// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3610// with awserr.Error's Code and Message methods to get detailed information about
3611// the error.
3612//
3613// See the AWS API reference guide for Amazon Simple Storage Service's
3614// API operation ListMultipartUploads for usage and error information.
3615// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
3616func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) {
3617 req, out := c.ListMultipartUploadsRequest(input)
3618 return out, req.Send()
3619}
3620
3621// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of
3622// the ability to pass a context and additional request options.
3623//
3624// See ListMultipartUploads for details on how to use this API operation.
3625//
3626// The context must be non-nil and will be used for request cancellation. If
3627// the context is nil a panic will occur. In the future the SDK may create
3628// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3629// for more information on using Contexts.
3630func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) {
3631 req, out := c.ListMultipartUploadsRequest(input)
3632 req.SetContext(ctx)
3633 req.ApplyOptions(opts...)
3634 return out, req.Send()
3635}
3636
3637// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation,
3638// calling the "fn" function with the response data for each page. To stop
3639// iterating, return false from the fn function.
3640//
3641// See ListMultipartUploads method for more information on how to use this operation.
3642//
3643// Note: This operation can generate multiple requests to a service.
3644//
3645// // Example iterating over at most 3 pages of a ListMultipartUploads operation.
3646// pageNum := 0
3647// err := client.ListMultipartUploadsPages(params,
3648// func(page *ListMultipartUploadsOutput, lastPage bool) bool {
3649// pageNum++
3650// fmt.Println(page)
3651// return pageNum <= 3
3652// })
3653//
3654func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error {
3655 return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn)
3656}
3657
3658// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except
3659// it takes a Context and allows setting request options on the pages.
3660//
3661// The context must be non-nil and will be used for request cancellation. If
3662// the context is nil a panic will occur. In the future the SDK may create
3663// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3664// for more information on using Contexts.
3665func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error {
3666 p := request.Pagination{
3667 NewRequest: func() (*request.Request, error) {
3668 var inCpy *ListMultipartUploadsInput
3669 if input != nil {
3670 tmp := *input
3671 inCpy = &tmp
3672 }
3673 req, _ := c.ListMultipartUploadsRequest(inCpy)
3674 req.SetContext(ctx)
3675 req.ApplyOptions(opts...)
3676 return req, nil
3677 },
3678 }
3679
3680 cont := true
3681 for p.Next() && cont {
3682 cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage())
3683 }
3684 return p.Err()
3685}
3686
3687const opListObjectVersions = "ListObjectVersions"
3688
3689// ListObjectVersionsRequest generates a "aws/request.Request" representing the
3690// client's request for the ListObjectVersions operation. The "output" return
3691// value can be used to capture response data after the request's "Send" method
3692// is called.
3693//
3694// See ListObjectVersions for usage and error information.
3695//
3696// Creating a request object using this method should be used when you want to inject
3697// custom logic into the request's lifecycle using a custom handler, or if you want to
3698// access properties on the request object before or after sending the request. If
3699// you just want the service response, call the ListObjectVersions method directly
3700// instead.
3701//
3702// Note: You must call the "Send" method on the returned request object in order
3703// to execute the request.
3704//
3705// // Example sending a request using the ListObjectVersionsRequest method.
3706// req, resp := client.ListObjectVersionsRequest(params)
3707//
3708// err := req.Send()
3709// if err == nil { // resp is now filled
3710// fmt.Println(resp)
3711// }
3712//
3713// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
3714func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) {
3715 op := &request.Operation{
3716 Name: opListObjectVersions,
3717 HTTPMethod: "GET",
3718 HTTPPath: "/{Bucket}?versions",
3719 Paginator: &request.Paginator{
3720 InputTokens: []string{"KeyMarker", "VersionIdMarker"},
3721 OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"},
3722 LimitToken: "MaxKeys",
3723 TruncationToken: "IsTruncated",
3724 },
3725 }
3726
3727 if input == nil {
3728 input = &ListObjectVersionsInput{}
3729 }
3730
3731 output = &ListObjectVersionsOutput{}
3732 req = c.newRequest(op, input, output)
3733 return
3734}
3735
3736// ListObjectVersions API operation for Amazon Simple Storage Service.
3737//
3738// Returns metadata about all of the versions of objects in a bucket.
3739//
3740// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3741// with awserr.Error's Code and Message methods to get detailed information about
3742// the error.
3743//
3744// See the AWS API reference guide for Amazon Simple Storage Service's
3745// API operation ListObjectVersions for usage and error information.
3746// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
3747func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) {
3748 req, out := c.ListObjectVersionsRequest(input)
3749 return out, req.Send()
3750}
3751
3752// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of
3753// the ability to pass a context and additional request options.
3754//
3755// See ListObjectVersions for details on how to use this API operation.
3756//
3757// The context must be non-nil and will be used for request cancellation. If
3758// the context is nil a panic will occur. In the future the SDK may create
3759// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3760// for more information on using Contexts.
3761func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) {
3762 req, out := c.ListObjectVersionsRequest(input)
3763 req.SetContext(ctx)
3764 req.ApplyOptions(opts...)
3765 return out, req.Send()
3766}
3767
3768// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation,
3769// calling the "fn" function with the response data for each page. To stop
3770// iterating, return false from the fn function.
3771//
3772// See ListObjectVersions method for more information on how to use this operation.
3773//
3774// Note: This operation can generate multiple requests to a service.
3775//
3776// // Example iterating over at most 3 pages of a ListObjectVersions operation.
3777// pageNum := 0
3778// err := client.ListObjectVersionsPages(params,
3779// func(page *ListObjectVersionsOutput, lastPage bool) bool {
3780// pageNum++
3781// fmt.Println(page)
3782// return pageNum <= 3
3783// })
3784//
3785func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error {
3786 return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn)
3787}
3788
3789// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except
3790// it takes a Context and allows setting request options on the pages.
3791//
3792// The context must be non-nil and will be used for request cancellation. If
3793// the context is nil a panic will occur. In the future the SDK may create
3794// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3795// for more information on using Contexts.
3796func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error {
3797 p := request.Pagination{
3798 NewRequest: func() (*request.Request, error) {
3799 var inCpy *ListObjectVersionsInput
3800 if input != nil {
3801 tmp := *input
3802 inCpy = &tmp
3803 }
3804 req, _ := c.ListObjectVersionsRequest(inCpy)
3805 req.SetContext(ctx)
3806 req.ApplyOptions(opts...)
3807 return req, nil
3808 },
3809 }
3810
3811 cont := true
3812 for p.Next() && cont {
3813 cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage())
3814 }
3815 return p.Err()
3816}
3817
3818const opListObjects = "ListObjects"
3819
3820// ListObjectsRequest generates a "aws/request.Request" representing the
3821// client's request for the ListObjects operation. The "output" return
3822// value can be used to capture response data after the request's "Send" method
3823// is called.
3824//
3825// See ListObjects for usage and error information.
3826//
3827// Creating a request object using this method should be used when you want to inject
3828// custom logic into the request's lifecycle using a custom handler, or if you want to
3829// access properties on the request object before or after sending the request. If
3830// you just want the service response, call the ListObjects method directly
3831// instead.
3832//
3833// Note: You must call the "Send" method on the returned request object in order
3834// to execute the request.
3835//
3836// // Example sending a request using the ListObjectsRequest method.
3837// req, resp := client.ListObjectsRequest(params)
3838//
3839// err := req.Send()
3840// if err == nil { // resp is now filled
3841// fmt.Println(resp)
3842// }
3843//
3844// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
3845func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) {
3846 op := &request.Operation{
3847 Name: opListObjects,
3848 HTTPMethod: "GET",
3849 HTTPPath: "/{Bucket}",
3850 Paginator: &request.Paginator{
3851 InputTokens: []string{"Marker"},
3852 OutputTokens: []string{"NextMarker || Contents[-1].Key"},
3853 LimitToken: "MaxKeys",
3854 TruncationToken: "IsTruncated",
3855 },
3856 }
3857
3858 if input == nil {
3859 input = &ListObjectsInput{}
3860 }
3861
3862 output = &ListObjectsOutput{}
3863 req = c.newRequest(op, input, output)
3864 return
3865}
3866
3867// ListObjects API operation for Amazon Simple Storage Service.
3868//
3869// Returns some or all (up to 1000) of the objects in a bucket. You can use
3870// the request parameters as selection criteria to return a subset of the objects
3871// in a bucket.
3872//
3873// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
3874// with awserr.Error's Code and Message methods to get detailed information about
3875// the error.
3876//
3877// See the AWS API reference guide for Amazon Simple Storage Service's
3878// API operation ListObjects for usage and error information.
3879//
3880// Returned Error Codes:
3881// * ErrCodeNoSuchBucket "NoSuchBucket"
3882// The specified bucket does not exist.
3883//
3884// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
3885func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) {
3886 req, out := c.ListObjectsRequest(input)
3887 return out, req.Send()
3888}
3889
3890// ListObjectsWithContext is the same as ListObjects with the addition of
3891// the ability to pass a context and additional request options.
3892//
3893// See ListObjects for details on how to use this API operation.
3894//
3895// The context must be non-nil and will be used for request cancellation. If
3896// the context is nil a panic will occur. In the future the SDK may create
3897// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3898// for more information on using Contexts.
3899func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) {
3900 req, out := c.ListObjectsRequest(input)
3901 req.SetContext(ctx)
3902 req.ApplyOptions(opts...)
3903 return out, req.Send()
3904}
3905
3906// ListObjectsPages iterates over the pages of a ListObjects operation,
3907// calling the "fn" function with the response data for each page. To stop
3908// iterating, return false from the fn function.
3909//
3910// See ListObjects method for more information on how to use this operation.
3911//
3912// Note: This operation can generate multiple requests to a service.
3913//
3914// // Example iterating over at most 3 pages of a ListObjects operation.
3915// pageNum := 0
3916// err := client.ListObjectsPages(params,
3917// func(page *ListObjectsOutput, lastPage bool) bool {
3918// pageNum++
3919// fmt.Println(page)
3920// return pageNum <= 3
3921// })
3922//
3923func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error {
3924 return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn)
3925}
3926
3927// ListObjectsPagesWithContext same as ListObjectsPages except
3928// it takes a Context and allows setting request options on the pages.
3929//
3930// The context must be non-nil and will be used for request cancellation. If
3931// the context is nil a panic will occur. In the future the SDK may create
3932// sub-contexts for http.Requests. See https://golang.org/pkg/context/
3933// for more information on using Contexts.
3934func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error {
3935 p := request.Pagination{
3936 NewRequest: func() (*request.Request, error) {
3937 var inCpy *ListObjectsInput
3938 if input != nil {
3939 tmp := *input
3940 inCpy = &tmp
3941 }
3942 req, _ := c.ListObjectsRequest(inCpy)
3943 req.SetContext(ctx)
3944 req.ApplyOptions(opts...)
3945 return req, nil
3946 },
3947 }
3948
3949 cont := true
3950 for p.Next() && cont {
3951 cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage())
3952 }
3953 return p.Err()
3954}
3955
3956const opListObjectsV2 = "ListObjectsV2"
3957
3958// ListObjectsV2Request generates a "aws/request.Request" representing the
3959// client's request for the ListObjectsV2 operation. The "output" return
3960// value can be used to capture response data after the request's "Send" method
3961// is called.
3962//
3963// See ListObjectsV2 for usage and error information.
3964//
3965// Creating a request object using this method should be used when you want to inject
3966// custom logic into the request's lifecycle using a custom handler, or if you want to
3967// access properties on the request object before or after sending the request. If
3968// you just want the service response, call the ListObjectsV2 method directly
3969// instead.
3970//
3971// Note: You must call the "Send" method on the returned request object in order
3972// to execute the request.
3973//
3974// // Example sending a request using the ListObjectsV2Request method.
3975// req, resp := client.ListObjectsV2Request(params)
3976//
3977// err := req.Send()
3978// if err == nil { // resp is now filled
3979// fmt.Println(resp)
3980// }
3981//
3982// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
3983func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) {
3984 op := &request.Operation{
3985 Name: opListObjectsV2,
3986 HTTPMethod: "GET",
3987 HTTPPath: "/{Bucket}?list-type=2",
3988 Paginator: &request.Paginator{
3989 InputTokens: []string{"ContinuationToken"},
3990 OutputTokens: []string{"NextContinuationToken"},
3991 LimitToken: "MaxKeys",
3992 TruncationToken: "",
3993 },
3994 }
3995
3996 if input == nil {
3997 input = &ListObjectsV2Input{}
3998 }
3999
4000 output = &ListObjectsV2Output{}
4001 req = c.newRequest(op, input, output)
4002 return
4003}
4004
4005// ListObjectsV2 API operation for Amazon Simple Storage Service.
4006//
4007// Returns some or all (up to 1000) of the objects in a bucket. You can use
4008// the request parameters as selection criteria to return a subset of the objects
4009// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend
4010// you use this revised API for new application development.
4011//
4012// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4013// with awserr.Error's Code and Message methods to get detailed information about
4014// the error.
4015//
4016// See the AWS API reference guide for Amazon Simple Storage Service's
4017// API operation ListObjectsV2 for usage and error information.
4018//
4019// Returned Error Codes:
4020// * ErrCodeNoSuchBucket "NoSuchBucket"
4021// The specified bucket does not exist.
4022//
4023// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
4024func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) {
4025 req, out := c.ListObjectsV2Request(input)
4026 return out, req.Send()
4027}
4028
4029// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of
4030// the ability to pass a context and additional request options.
4031//
4032// See ListObjectsV2 for details on how to use this API operation.
4033//
4034// The context must be non-nil and will be used for request cancellation. If
4035// the context is nil a panic will occur. In the future the SDK may create
4036// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4037// for more information on using Contexts.
4038func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) {
4039 req, out := c.ListObjectsV2Request(input)
4040 req.SetContext(ctx)
4041 req.ApplyOptions(opts...)
4042 return out, req.Send()
4043}
4044
4045// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation,
4046// calling the "fn" function with the response data for each page. To stop
4047// iterating, return false from the fn function.
4048//
4049// See ListObjectsV2 method for more information on how to use this operation.
4050//
4051// Note: This operation can generate multiple requests to a service.
4052//
4053// // Example iterating over at most 3 pages of a ListObjectsV2 operation.
4054// pageNum := 0
4055// err := client.ListObjectsV2Pages(params,
4056// func(page *ListObjectsV2Output, lastPage bool) bool {
4057// pageNum++
4058// fmt.Println(page)
4059// return pageNum <= 3
4060// })
4061//
4062func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error {
4063 return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn)
4064}
4065
4066// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except
4067// it takes a Context and allows setting request options on the pages.
4068//
4069// The context must be non-nil and will be used for request cancellation. If
4070// the context is nil a panic will occur. In the future the SDK may create
4071// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4072// for more information on using Contexts.
4073func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error {
4074 p := request.Pagination{
4075 NewRequest: func() (*request.Request, error) {
4076 var inCpy *ListObjectsV2Input
4077 if input != nil {
4078 tmp := *input
4079 inCpy = &tmp
4080 }
4081 req, _ := c.ListObjectsV2Request(inCpy)
4082 req.SetContext(ctx)
4083 req.ApplyOptions(opts...)
4084 return req, nil
4085 },
4086 }
4087
4088 cont := true
4089 for p.Next() && cont {
4090 cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage())
4091 }
4092 return p.Err()
4093}
4094
4095const opListParts = "ListParts"
4096
4097// ListPartsRequest generates a "aws/request.Request" representing the
4098// client's request for the ListParts operation. The "output" return
4099// value can be used to capture response data after the request's "Send" method
4100// is called.
4101//
4102// See ListParts for usage and error information.
4103//
4104// Creating a request object using this method should be used when you want to inject
4105// custom logic into the request's lifecycle using a custom handler, or if you want to
4106// access properties on the request object before or after sending the request. If
4107// you just want the service response, call the ListParts method directly
4108// instead.
4109//
4110// Note: You must call the "Send" method on the returned request object in order
4111// to execute the request.
4112//
4113// // Example sending a request using the ListPartsRequest method.
4114// req, resp := client.ListPartsRequest(params)
4115//
4116// err := req.Send()
4117// if err == nil { // resp is now filled
4118// fmt.Println(resp)
4119// }
4120//
4121// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
4122func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) {
4123 op := &request.Operation{
4124 Name: opListParts,
4125 HTTPMethod: "GET",
4126 HTTPPath: "/{Bucket}/{Key+}",
4127 Paginator: &request.Paginator{
4128 InputTokens: []string{"PartNumberMarker"},
4129 OutputTokens: []string{"NextPartNumberMarker"},
4130 LimitToken: "MaxParts",
4131 TruncationToken: "IsTruncated",
4132 },
4133 }
4134
4135 if input == nil {
4136 input = &ListPartsInput{}
4137 }
4138
4139 output = &ListPartsOutput{}
4140 req = c.newRequest(op, input, output)
4141 return
4142}
4143
4144// ListParts API operation for Amazon Simple Storage Service.
4145//
4146// Lists the parts that have been uploaded for a specific multipart upload.
4147//
4148// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4149// with awserr.Error's Code and Message methods to get detailed information about
4150// the error.
4151//
4152// See the AWS API reference guide for Amazon Simple Storage Service's
4153// API operation ListParts for usage and error information.
4154// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
4155func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) {
4156 req, out := c.ListPartsRequest(input)
4157 return out, req.Send()
4158}
4159
4160// ListPartsWithContext is the same as ListParts with the addition of
4161// the ability to pass a context and additional request options.
4162//
4163// See ListParts for details on how to use this API operation.
4164//
4165// The context must be non-nil and will be used for request cancellation. If
4166// the context is nil a panic will occur. In the future the SDK may create
4167// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4168// for more information on using Contexts.
4169func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) {
4170 req, out := c.ListPartsRequest(input)
4171 req.SetContext(ctx)
4172 req.ApplyOptions(opts...)
4173 return out, req.Send()
4174}
4175
4176// ListPartsPages iterates over the pages of a ListParts operation,
4177// calling the "fn" function with the response data for each page. To stop
4178// iterating, return false from the fn function.
4179//
4180// See ListParts method for more information on how to use this operation.
4181//
4182// Note: This operation can generate multiple requests to a service.
4183//
4184// // Example iterating over at most 3 pages of a ListParts operation.
4185// pageNum := 0
4186// err := client.ListPartsPages(params,
4187// func(page *ListPartsOutput, lastPage bool) bool {
4188// pageNum++
4189// fmt.Println(page)
4190// return pageNum <= 3
4191// })
4192//
4193func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error {
4194 return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn)
4195}
4196
4197// ListPartsPagesWithContext same as ListPartsPages except
4198// it takes a Context and allows setting request options on the pages.
4199//
4200// The context must be non-nil and will be used for request cancellation. If
4201// the context is nil a panic will occur. In the future the SDK may create
4202// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4203// for more information on using Contexts.
4204func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error {
4205 p := request.Pagination{
4206 NewRequest: func() (*request.Request, error) {
4207 var inCpy *ListPartsInput
4208 if input != nil {
4209 tmp := *input
4210 inCpy = &tmp
4211 }
4212 req, _ := c.ListPartsRequest(inCpy)
4213 req.SetContext(ctx)
4214 req.ApplyOptions(opts...)
4215 return req, nil
4216 },
4217 }
4218
4219 cont := true
4220 for p.Next() && cont {
4221 cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage())
4222 }
4223 return p.Err()
4224}
4225
4226const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration"
4227
4228// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
4229// client's request for the PutBucketAccelerateConfiguration operation. The "output" return
4230// value can be used to capture response data after the request's "Send" method
4231// is called.
4232//
4233// See PutBucketAccelerateConfiguration for usage and error information.
4234//
4235// Creating a request object using this method should be used when you want to inject
4236// custom logic into the request's lifecycle using a custom handler, or if you want to
4237// access properties on the request object before or after sending the request. If
4238// you just want the service response, call the PutBucketAccelerateConfiguration method directly
4239// instead.
4240//
4241// Note: You must call the "Send" method on the returned request object in order
4242// to execute the request.
4243//
4244// // Example sending a request using the PutBucketAccelerateConfigurationRequest method.
4245// req, resp := client.PutBucketAccelerateConfigurationRequest(params)
4246//
4247// err := req.Send()
4248// if err == nil { // resp is now filled
4249// fmt.Println(resp)
4250// }
4251//
4252// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration
4253func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) {
4254 op := &request.Operation{
4255 Name: opPutBucketAccelerateConfiguration,
4256 HTTPMethod: "PUT",
4257 HTTPPath: "/{Bucket}?accelerate",
4258 }
4259
4260 if input == nil {
4261 input = &PutBucketAccelerateConfigurationInput{}
4262 }
4263
4264 output = &PutBucketAccelerateConfigurationOutput{}
4265 req = c.newRequest(op, input, output)
4266 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
4267 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
4268 return
4269}
4270
4271// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
4272//
4273// Sets the accelerate configuration of an existing bucket.
4274//
4275// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4276// with awserr.Error's Code and Message methods to get detailed information about
4277// the error.
4278//
4279// See the AWS API reference guide for Amazon Simple Storage Service's
4280// API operation PutBucketAccelerateConfiguration for usage and error information.
4281// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration
4282func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) {
4283 req, out := c.PutBucketAccelerateConfigurationRequest(input)
4284 return out, req.Send()
4285}
4286
4287// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of
4288// the ability to pass a context and additional request options.
4289//
4290// See PutBucketAccelerateConfiguration for details on how to use this API operation.
4291//
4292// The context must be non-nil and will be used for request cancellation. If
4293// the context is nil a panic will occur. In the future the SDK may create
4294// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4295// for more information on using Contexts.
4296func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) {
4297 req, out := c.PutBucketAccelerateConfigurationRequest(input)
4298 req.SetContext(ctx)
4299 req.ApplyOptions(opts...)
4300 return out, req.Send()
4301}
4302
4303const opPutBucketAcl = "PutBucketAcl"
4304
4305// PutBucketAclRequest generates a "aws/request.Request" representing the
4306// client's request for the PutBucketAcl operation. The "output" return
4307// value can be used to capture response data after the request's "Send" method
4308// is called.
4309//
4310// See PutBucketAcl for usage and error information.
4311//
4312// Creating a request object using this method should be used when you want to inject
4313// custom logic into the request's lifecycle using a custom handler, or if you want to
4314// access properties on the request object before or after sending the request. If
4315// you just want the service response, call the PutBucketAcl method directly
4316// instead.
4317//
4318// Note: You must call the "Send" method on the returned request object in order
4319// to execute the request.
4320//
4321// // Example sending a request using the PutBucketAclRequest method.
4322// req, resp := client.PutBucketAclRequest(params)
4323//
4324// err := req.Send()
4325// if err == nil { // resp is now filled
4326// fmt.Println(resp)
4327// }
4328//
4329// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
4330func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) {
4331 op := &request.Operation{
4332 Name: opPutBucketAcl,
4333 HTTPMethod: "PUT",
4334 HTTPPath: "/{Bucket}?acl",
4335 }
4336
4337 if input == nil {
4338 input = &PutBucketAclInput{}
4339 }
4340
4341 output = &PutBucketAclOutput{}
4342 req = c.newRequest(op, input, output)
4343 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
4344 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
4345 return
4346}
4347
4348// PutBucketAcl API operation for Amazon Simple Storage Service.
4349//
4350// Sets the permissions on a bucket using access control lists (ACL).
4351//
4352// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4353// with awserr.Error's Code and Message methods to get detailed information about
4354// the error.
4355//
4356// See the AWS API reference guide for Amazon Simple Storage Service's
4357// API operation PutBucketAcl for usage and error information.
4358// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
4359func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) {
4360 req, out := c.PutBucketAclRequest(input)
4361 return out, req.Send()
4362}
4363
4364// PutBucketAclWithContext is the same as PutBucketAcl with the addition of
4365// the ability to pass a context and additional request options.
4366//
4367// See PutBucketAcl for details on how to use this API operation.
4368//
4369// The context must be non-nil and will be used for request cancellation. If
4370// the context is nil a panic will occur. In the future the SDK may create
4371// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4372// for more information on using Contexts.
4373func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) {
4374 req, out := c.PutBucketAclRequest(input)
4375 req.SetContext(ctx)
4376 req.ApplyOptions(opts...)
4377 return out, req.Send()
4378}
4379
4380const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration"
4381
4382// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
4383// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return
4384// value can be used to capture response data after the request's "Send" method
4385// is called.
4386//
4387// See PutBucketAnalyticsConfiguration for usage and error information.
4388//
4389// Creating a request object using this method should be used when you want to inject
4390// custom logic into the request's lifecycle using a custom handler, or if you want to
4391// access properties on the request object before or after sending the request. If
4392// you just want the service response, call the PutBucketAnalyticsConfiguration method directly
4393// instead.
4394//
4395// Note: You must call the "Send" method on the returned request object in order
4396// to execute the request.
4397//
4398// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method.
4399// req, resp := client.PutBucketAnalyticsConfigurationRequest(params)
4400//
4401// err := req.Send()
4402// if err == nil { // resp is now filled
4403// fmt.Println(resp)
4404// }
4405//
4406// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration
4407func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) {
4408 op := &request.Operation{
4409 Name: opPutBucketAnalyticsConfiguration,
4410 HTTPMethod: "PUT",
4411 HTTPPath: "/{Bucket}?analytics",
4412 }
4413
4414 if input == nil {
4415 input = &PutBucketAnalyticsConfigurationInput{}
4416 }
4417
4418 output = &PutBucketAnalyticsConfigurationOutput{}
4419 req = c.newRequest(op, input, output)
4420 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
4421 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
4422 return
4423}
4424
4425// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
4426//
4427// Sets an analytics configuration for the bucket (specified by the analytics
4428// configuration ID).
4429//
4430// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4431// with awserr.Error's Code and Message methods to get detailed information about
4432// the error.
4433//
4434// See the AWS API reference guide for Amazon Simple Storage Service's
4435// API operation PutBucketAnalyticsConfiguration for usage and error information.
4436// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration
4437func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) {
4438 req, out := c.PutBucketAnalyticsConfigurationRequest(input)
4439 return out, req.Send()
4440}
4441
4442// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of
4443// the ability to pass a context and additional request options.
4444//
4445// See PutBucketAnalyticsConfiguration for details on how to use this API operation.
4446//
4447// The context must be non-nil and will be used for request cancellation. If
4448// the context is nil a panic will occur. In the future the SDK may create
4449// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4450// for more information on using Contexts.
4451func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) {
4452 req, out := c.PutBucketAnalyticsConfigurationRequest(input)
4453 req.SetContext(ctx)
4454 req.ApplyOptions(opts...)
4455 return out, req.Send()
4456}
4457
4458const opPutBucketCors = "PutBucketCors"
4459
4460// PutBucketCorsRequest generates a "aws/request.Request" representing the
4461// client's request for the PutBucketCors operation. The "output" return
4462// value can be used to capture response data after the request's "Send" method
4463// is called.
4464//
4465// See PutBucketCors for usage and error information.
4466//
4467// Creating a request object using this method should be used when you want to inject
4468// custom logic into the request's lifecycle using a custom handler, or if you want to
4469// access properties on the request object before or after sending the request. If
4470// you just want the service response, call the PutBucketCors method directly
4471// instead.
4472//
4473// Note: You must call the "Send" method on the returned request object in order
4474// to execute the request.
4475//
4476// // Example sending a request using the PutBucketCorsRequest method.
4477// req, resp := client.PutBucketCorsRequest(params)
4478//
4479// err := req.Send()
4480// if err == nil { // resp is now filled
4481// fmt.Println(resp)
4482// }
4483//
4484// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
4485func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) {
4486 op := &request.Operation{
4487 Name: opPutBucketCors,
4488 HTTPMethod: "PUT",
4489 HTTPPath: "/{Bucket}?cors",
4490 }
4491
4492 if input == nil {
4493 input = &PutBucketCorsInput{}
4494 }
4495
4496 output = &PutBucketCorsOutput{}
4497 req = c.newRequest(op, input, output)
4498 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
4499 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
4500 return
4501}
4502
4503// PutBucketCors API operation for Amazon Simple Storage Service.
4504//
4505// Sets the cors configuration for a bucket.
4506//
4507// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4508// with awserr.Error's Code and Message methods to get detailed information about
4509// the error.
4510//
4511// See the AWS API reference guide for Amazon Simple Storage Service's
4512// API operation PutBucketCors for usage and error information.
4513// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
4514func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) {
4515 req, out := c.PutBucketCorsRequest(input)
4516 return out, req.Send()
4517}
4518
4519// PutBucketCorsWithContext is the same as PutBucketCors with the addition of
4520// the ability to pass a context and additional request options.
4521//
4522// See PutBucketCors for details on how to use this API operation.
4523//
4524// The context must be non-nil and will be used for request cancellation. If
4525// the context is nil a panic will occur. In the future the SDK may create
4526// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4527// for more information on using Contexts.
4528func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) {
4529 req, out := c.PutBucketCorsRequest(input)
4530 req.SetContext(ctx)
4531 req.ApplyOptions(opts...)
4532 return out, req.Send()
4533}
4534
4535const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration"
4536
4537// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
4538// client's request for the PutBucketInventoryConfiguration operation. The "output" return
4539// value can be used to capture response data after the request's "Send" method
4540// is called.
4541//
4542// See PutBucketInventoryConfiguration for usage and error information.
4543//
4544// Creating a request object using this method should be used when you want to inject
4545// custom logic into the request's lifecycle using a custom handler, or if you want to
4546// access properties on the request object before or after sending the request. If
4547// you just want the service response, call the PutBucketInventoryConfiguration method directly
4548// instead.
4549//
4550// Note: You must call the "Send" method on the returned request object in order
4551// to execute the request.
4552//
4553// // Example sending a request using the PutBucketInventoryConfigurationRequest method.
4554// req, resp := client.PutBucketInventoryConfigurationRequest(params)
4555//
4556// err := req.Send()
4557// if err == nil { // resp is now filled
4558// fmt.Println(resp)
4559// }
4560//
4561// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration
4562func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) {
4563 op := &request.Operation{
4564 Name: opPutBucketInventoryConfiguration,
4565 HTTPMethod: "PUT",
4566 HTTPPath: "/{Bucket}?inventory",
4567 }
4568
4569 if input == nil {
4570 input = &PutBucketInventoryConfigurationInput{}
4571 }
4572
4573 output = &PutBucketInventoryConfigurationOutput{}
4574 req = c.newRequest(op, input, output)
4575 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
4576 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
4577 return
4578}
4579
4580// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
4581//
4582// Adds an inventory configuration (identified by the inventory ID) from the
4583// bucket.
4584//
4585// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4586// with awserr.Error's Code and Message methods to get detailed information about
4587// the error.
4588//
4589// See the AWS API reference guide for Amazon Simple Storage Service's
4590// API operation PutBucketInventoryConfiguration for usage and error information.
4591// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration
4592func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) {
4593 req, out := c.PutBucketInventoryConfigurationRequest(input)
4594 return out, req.Send()
4595}
4596
4597// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of
4598// the ability to pass a context and additional request options.
4599//
4600// See PutBucketInventoryConfiguration for details on how to use this API operation.
4601//
4602// The context must be non-nil and will be used for request cancellation. If
4603// the context is nil a panic will occur. In the future the SDK may create
4604// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4605// for more information on using Contexts.
4606func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) {
4607 req, out := c.PutBucketInventoryConfigurationRequest(input)
4608 req.SetContext(ctx)
4609 req.ApplyOptions(opts...)
4610 return out, req.Send()
4611}
4612
4613const opPutBucketLifecycle = "PutBucketLifecycle"
4614
4615// PutBucketLifecycleRequest generates a "aws/request.Request" representing the
4616// client's request for the PutBucketLifecycle operation. The "output" return
4617// value can be used to capture response data after the request's "Send" method
4618// is called.
4619//
4620// See PutBucketLifecycle for usage and error information.
4621//
4622// Creating a request object using this method should be used when you want to inject
4623// custom logic into the request's lifecycle using a custom handler, or if you want to
4624// access properties on the request object before or after sending the request. If
4625// you just want the service response, call the PutBucketLifecycle method directly
4626// instead.
4627//
4628// Note: You must call the "Send" method on the returned request object in order
4629// to execute the request.
4630//
4631// // Example sending a request using the PutBucketLifecycleRequest method.
4632// req, resp := client.PutBucketLifecycleRequest(params)
4633//
4634// err := req.Send()
4635// if err == nil { // resp is now filled
4636// fmt.Println(resp)
4637// }
4638//
4639// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
4640func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) {
4641 if c.Client.Config.Logger != nil {
4642 c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated")
4643 }
4644 op := &request.Operation{
4645 Name: opPutBucketLifecycle,
4646 HTTPMethod: "PUT",
4647 HTTPPath: "/{Bucket}?lifecycle",
4648 }
4649
4650 if input == nil {
4651 input = &PutBucketLifecycleInput{}
4652 }
4653
4654 output = &PutBucketLifecycleOutput{}
4655 req = c.newRequest(op, input, output)
4656 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
4657 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
4658 return
4659}
4660
4661// PutBucketLifecycle API operation for Amazon Simple Storage Service.
4662//
4663// Deprecated, see the PutBucketLifecycleConfiguration operation.
4664//
4665// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4666// with awserr.Error's Code and Message methods to get detailed information about
4667// the error.
4668//
4669// See the AWS API reference guide for Amazon Simple Storage Service's
4670// API operation PutBucketLifecycle for usage and error information.
4671// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
4672func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) {
4673 req, out := c.PutBucketLifecycleRequest(input)
4674 return out, req.Send()
4675}
4676
4677// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of
4678// the ability to pass a context and additional request options.
4679//
4680// See PutBucketLifecycle for details on how to use this API operation.
4681//
4682// The context must be non-nil and will be used for request cancellation. If
4683// the context is nil a panic will occur. In the future the SDK may create
4684// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4685// for more information on using Contexts.
4686func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) {
4687 req, out := c.PutBucketLifecycleRequest(input)
4688 req.SetContext(ctx)
4689 req.ApplyOptions(opts...)
4690 return out, req.Send()
4691}
4692
4693const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
4694
4695// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
4696// client's request for the PutBucketLifecycleConfiguration operation. The "output" return
4697// value can be used to capture response data after the request's "Send" method
4698// is called.
4699//
4700// See PutBucketLifecycleConfiguration for usage and error information.
4701//
4702// Creating a request object using this method should be used when you want to inject
4703// custom logic into the request's lifecycle using a custom handler, or if you want to
4704// access properties on the request object before or after sending the request. If
4705// you just want the service response, call the PutBucketLifecycleConfiguration method directly
4706// instead.
4707//
4708// Note: You must call the "Send" method on the returned request object in order
4709// to execute the request.
4710//
4711// // Example sending a request using the PutBucketLifecycleConfigurationRequest method.
4712// req, resp := client.PutBucketLifecycleConfigurationRequest(params)
4713//
4714// err := req.Send()
4715// if err == nil { // resp is now filled
4716// fmt.Println(resp)
4717// }
4718//
4719// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
4720func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) {
4721 op := &request.Operation{
4722 Name: opPutBucketLifecycleConfiguration,
4723 HTTPMethod: "PUT",
4724 HTTPPath: "/{Bucket}?lifecycle",
4725 }
4726
4727 if input == nil {
4728 input = &PutBucketLifecycleConfigurationInput{}
4729 }
4730
4731 output = &PutBucketLifecycleConfigurationOutput{}
4732 req = c.newRequest(op, input, output)
4733 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
4734 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
4735 return
4736}
4737
4738// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
4739//
4740// Sets lifecycle configuration for your bucket. If a lifecycle configuration
4741// exists, it replaces it.
4742//
4743// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4744// with awserr.Error's Code and Message methods to get detailed information about
4745// the error.
4746//
4747// See the AWS API reference guide for Amazon Simple Storage Service's
4748// API operation PutBucketLifecycleConfiguration for usage and error information.
4749// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
4750func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) {
4751 req, out := c.PutBucketLifecycleConfigurationRequest(input)
4752 return out, req.Send()
4753}
4754
4755// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of
4756// the ability to pass a context and additional request options.
4757//
4758// See PutBucketLifecycleConfiguration for details on how to use this API operation.
4759//
4760// The context must be non-nil and will be used for request cancellation. If
4761// the context is nil a panic will occur. In the future the SDK may create
4762// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4763// for more information on using Contexts.
4764func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) {
4765 req, out := c.PutBucketLifecycleConfigurationRequest(input)
4766 req.SetContext(ctx)
4767 req.ApplyOptions(opts...)
4768 return out, req.Send()
4769}
4770
4771const opPutBucketLogging = "PutBucketLogging"
4772
4773// PutBucketLoggingRequest generates a "aws/request.Request" representing the
4774// client's request for the PutBucketLogging operation. The "output" return
4775// value can be used to capture response data after the request's "Send" method
4776// is called.
4777//
4778// See PutBucketLogging for usage and error information.
4779//
4780// Creating a request object using this method should be used when you want to inject
4781// custom logic into the request's lifecycle using a custom handler, or if you want to
4782// access properties on the request object before or after sending the request. If
4783// you just want the service response, call the PutBucketLogging method directly
4784// instead.
4785//
4786// Note: You must call the "Send" method on the returned request object in order
4787// to execute the request.
4788//
4789// // Example sending a request using the PutBucketLoggingRequest method.
4790// req, resp := client.PutBucketLoggingRequest(params)
4791//
4792// err := req.Send()
4793// if err == nil { // resp is now filled
4794// fmt.Println(resp)
4795// }
4796//
4797// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
4798func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) {
4799 op := &request.Operation{
4800 Name: opPutBucketLogging,
4801 HTTPMethod: "PUT",
4802 HTTPPath: "/{Bucket}?logging",
4803 }
4804
4805 if input == nil {
4806 input = &PutBucketLoggingInput{}
4807 }
4808
4809 output = &PutBucketLoggingOutput{}
4810 req = c.newRequest(op, input, output)
4811 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
4812 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
4813 return
4814}
4815
4816// PutBucketLogging API operation for Amazon Simple Storage Service.
4817//
4818// Set the logging parameters for a bucket and to specify permissions for who
4819// can view and modify the logging parameters. To set the logging status of
4820// a bucket, you must be the bucket owner.
4821//
4822// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4823// with awserr.Error's Code and Message methods to get detailed information about
4824// the error.
4825//
4826// See the AWS API reference guide for Amazon Simple Storage Service's
4827// API operation PutBucketLogging for usage and error information.
4828// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
4829func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) {
4830 req, out := c.PutBucketLoggingRequest(input)
4831 return out, req.Send()
4832}
4833
4834// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of
4835// the ability to pass a context and additional request options.
4836//
4837// See PutBucketLogging for details on how to use this API operation.
4838//
4839// The context must be non-nil and will be used for request cancellation. If
4840// the context is nil a panic will occur. In the future the SDK may create
4841// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4842// for more information on using Contexts.
4843func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) {
4844 req, out := c.PutBucketLoggingRequest(input)
4845 req.SetContext(ctx)
4846 req.ApplyOptions(opts...)
4847 return out, req.Send()
4848}
4849
4850const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration"
4851
4852// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
4853// client's request for the PutBucketMetricsConfiguration operation. The "output" return
4854// value can be used to capture response data after the request's "Send" method
4855// is called.
4856//
4857// See PutBucketMetricsConfiguration for usage and error information.
4858//
4859// Creating a request object using this method should be used when you want to inject
4860// custom logic into the request's lifecycle using a custom handler, or if you want to
4861// access properties on the request object before or after sending the request. If
4862// you just want the service response, call the PutBucketMetricsConfiguration method directly
4863// instead.
4864//
4865// Note: You must call the "Send" method on the returned request object in order
4866// to execute the request.
4867//
4868// // Example sending a request using the PutBucketMetricsConfigurationRequest method.
4869// req, resp := client.PutBucketMetricsConfigurationRequest(params)
4870//
4871// err := req.Send()
4872// if err == nil { // resp is now filled
4873// fmt.Println(resp)
4874// }
4875//
4876// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration
4877func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) {
4878 op := &request.Operation{
4879 Name: opPutBucketMetricsConfiguration,
4880 HTTPMethod: "PUT",
4881 HTTPPath: "/{Bucket}?metrics",
4882 }
4883
4884 if input == nil {
4885 input = &PutBucketMetricsConfigurationInput{}
4886 }
4887
4888 output = &PutBucketMetricsConfigurationOutput{}
4889 req = c.newRequest(op, input, output)
4890 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
4891 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
4892 return
4893}
4894
4895// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
4896//
4897// Sets a metrics configuration (specified by the metrics configuration ID)
4898// for the bucket.
4899//
4900// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4901// with awserr.Error's Code and Message methods to get detailed information about
4902// the error.
4903//
4904// See the AWS API reference guide for Amazon Simple Storage Service's
4905// API operation PutBucketMetricsConfiguration for usage and error information.
4906// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration
4907func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) {
4908 req, out := c.PutBucketMetricsConfigurationRequest(input)
4909 return out, req.Send()
4910}
4911
4912// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of
4913// the ability to pass a context and additional request options.
4914//
4915// See PutBucketMetricsConfiguration for details on how to use this API operation.
4916//
4917// The context must be non-nil and will be used for request cancellation. If
4918// the context is nil a panic will occur. In the future the SDK may create
4919// sub-contexts for http.Requests. See https://golang.org/pkg/context/
4920// for more information on using Contexts.
4921func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) {
4922 req, out := c.PutBucketMetricsConfigurationRequest(input)
4923 req.SetContext(ctx)
4924 req.ApplyOptions(opts...)
4925 return out, req.Send()
4926}
4927
4928const opPutBucketNotification = "PutBucketNotification"
4929
4930// PutBucketNotificationRequest generates a "aws/request.Request" representing the
4931// client's request for the PutBucketNotification operation. The "output" return
4932// value can be used to capture response data after the request's "Send" method
4933// is called.
4934//
4935// See PutBucketNotification for usage and error information.
4936//
4937// Creating a request object using this method should be used when you want to inject
4938// custom logic into the request's lifecycle using a custom handler, or if you want to
4939// access properties on the request object before or after sending the request. If
4940// you just want the service response, call the PutBucketNotification method directly
4941// instead.
4942//
4943// Note: You must call the "Send" method on the returned request object in order
4944// to execute the request.
4945//
4946// // Example sending a request using the PutBucketNotificationRequest method.
4947// req, resp := client.PutBucketNotificationRequest(params)
4948//
4949// err := req.Send()
4950// if err == nil { // resp is now filled
4951// fmt.Println(resp)
4952// }
4953//
4954// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
4955func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) {
4956 if c.Client.Config.Logger != nil {
4957 c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated")
4958 }
4959 op := &request.Operation{
4960 Name: opPutBucketNotification,
4961 HTTPMethod: "PUT",
4962 HTTPPath: "/{Bucket}?notification",
4963 }
4964
4965 if input == nil {
4966 input = &PutBucketNotificationInput{}
4967 }
4968
4969 output = &PutBucketNotificationOutput{}
4970 req = c.newRequest(op, input, output)
4971 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
4972 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
4973 return
4974}
4975
4976// PutBucketNotification API operation for Amazon Simple Storage Service.
4977//
4978// Deprecated, see the PutBucketNotificationConfiguraiton operation.
4979//
4980// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
4981// with awserr.Error's Code and Message methods to get detailed information about
4982// the error.
4983//
4984// See the AWS API reference guide for Amazon Simple Storage Service's
4985// API operation PutBucketNotification for usage and error information.
4986// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
4987func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) {
4988 req, out := c.PutBucketNotificationRequest(input)
4989 return out, req.Send()
4990}
4991
4992// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of
4993// the ability to pass a context and additional request options.
4994//
4995// See PutBucketNotification for details on how to use this API operation.
4996//
4997// The context must be non-nil and will be used for request cancellation. If
4998// the context is nil a panic will occur. In the future the SDK may create
4999// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5000// for more information on using Contexts.
5001func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) {
5002 req, out := c.PutBucketNotificationRequest(input)
5003 req.SetContext(ctx)
5004 req.ApplyOptions(opts...)
5005 return out, req.Send()
5006}
5007
5008const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration"
5009
5010// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
5011// client's request for the PutBucketNotificationConfiguration operation. The "output" return
5012// value can be used to capture response data after the request's "Send" method
5013// is called.
5014//
5015// See PutBucketNotificationConfiguration for usage and error information.
5016//
5017// Creating a request object using this method should be used when you want to inject
5018// custom logic into the request's lifecycle using a custom handler, or if you want to
5019// access properties on the request object before or after sending the request. If
5020// you just want the service response, call the PutBucketNotificationConfiguration method directly
5021// instead.
5022//
5023// Note: You must call the "Send" method on the returned request object in order
5024// to execute the request.
5025//
5026// // Example sending a request using the PutBucketNotificationConfigurationRequest method.
5027// req, resp := client.PutBucketNotificationConfigurationRequest(params)
5028//
5029// err := req.Send()
5030// if err == nil { // resp is now filled
5031// fmt.Println(resp)
5032// }
5033//
5034// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration
5035func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) {
5036 op := &request.Operation{
5037 Name: opPutBucketNotificationConfiguration,
5038 HTTPMethod: "PUT",
5039 HTTPPath: "/{Bucket}?notification",
5040 }
5041
5042 if input == nil {
5043 input = &PutBucketNotificationConfigurationInput{}
5044 }
5045
5046 output = &PutBucketNotificationConfigurationOutput{}
5047 req = c.newRequest(op, input, output)
5048 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
5049 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
5050 return
5051}
5052
5053// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service.
5054//
5055// Enables notifications of specified events for a bucket.
5056//
5057// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5058// with awserr.Error's Code and Message methods to get detailed information about
5059// the error.
5060//
5061// See the AWS API reference guide for Amazon Simple Storage Service's
5062// API operation PutBucketNotificationConfiguration for usage and error information.
5063// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration
5064func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) {
5065 req, out := c.PutBucketNotificationConfigurationRequest(input)
5066 return out, req.Send()
5067}
5068
5069// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of
5070// the ability to pass a context and additional request options.
5071//
5072// See PutBucketNotificationConfiguration for details on how to use this API operation.
5073//
5074// The context must be non-nil and will be used for request cancellation. If
5075// the context is nil a panic will occur. In the future the SDK may create
5076// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5077// for more information on using Contexts.
5078func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) {
5079 req, out := c.PutBucketNotificationConfigurationRequest(input)
5080 req.SetContext(ctx)
5081 req.ApplyOptions(opts...)
5082 return out, req.Send()
5083}
5084
5085const opPutBucketPolicy = "PutBucketPolicy"
5086
5087// PutBucketPolicyRequest generates a "aws/request.Request" representing the
5088// client's request for the PutBucketPolicy operation. The "output" return
5089// value can be used to capture response data after the request's "Send" method
5090// is called.
5091//
5092// See PutBucketPolicy for usage and error information.
5093//
5094// Creating a request object using this method should be used when you want to inject
5095// custom logic into the request's lifecycle using a custom handler, or if you want to
5096// access properties on the request object before or after sending the request. If
5097// you just want the service response, call the PutBucketPolicy method directly
5098// instead.
5099//
5100// Note: You must call the "Send" method on the returned request object in order
5101// to execute the request.
5102//
5103// // Example sending a request using the PutBucketPolicyRequest method.
5104// req, resp := client.PutBucketPolicyRequest(params)
5105//
5106// err := req.Send()
5107// if err == nil { // resp is now filled
5108// fmt.Println(resp)
5109// }
5110//
5111// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy
5112func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) {
5113 op := &request.Operation{
5114 Name: opPutBucketPolicy,
5115 HTTPMethod: "PUT",
5116 HTTPPath: "/{Bucket}?policy",
5117 }
5118
5119 if input == nil {
5120 input = &PutBucketPolicyInput{}
5121 }
5122
5123 output = &PutBucketPolicyOutput{}
5124 req = c.newRequest(op, input, output)
5125 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
5126 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
5127 return
5128}
5129
5130// PutBucketPolicy API operation for Amazon Simple Storage Service.
5131//
5132// Replaces a policy on a bucket. If the bucket already has a policy, the one
5133// in this request completely replaces it.
5134//
5135// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5136// with awserr.Error's Code and Message methods to get detailed information about
5137// the error.
5138//
5139// See the AWS API reference guide for Amazon Simple Storage Service's
5140// API operation PutBucketPolicy for usage and error information.
5141// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy
5142func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) {
5143 req, out := c.PutBucketPolicyRequest(input)
5144 return out, req.Send()
5145}
5146
5147// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of
5148// the ability to pass a context and additional request options.
5149//
5150// See PutBucketPolicy for details on how to use this API operation.
5151//
5152// The context must be non-nil and will be used for request cancellation. If
5153// the context is nil a panic will occur. In the future the SDK may create
5154// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5155// for more information on using Contexts.
5156func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) {
5157 req, out := c.PutBucketPolicyRequest(input)
5158 req.SetContext(ctx)
5159 req.ApplyOptions(opts...)
5160 return out, req.Send()
5161}
5162
5163const opPutBucketReplication = "PutBucketReplication"
5164
5165// PutBucketReplicationRequest generates a "aws/request.Request" representing the
5166// client's request for the PutBucketReplication operation. The "output" return
5167// value can be used to capture response data after the request's "Send" method
5168// is called.
5169//
5170// See PutBucketReplication for usage and error information.
5171//
5172// Creating a request object using this method should be used when you want to inject
5173// custom logic into the request's lifecycle using a custom handler, or if you want to
5174// access properties on the request object before or after sending the request. If
5175// you just want the service response, call the PutBucketReplication method directly
5176// instead.
5177//
5178// Note: You must call the "Send" method on the returned request object in order
5179// to execute the request.
5180//
5181// // Example sending a request using the PutBucketReplicationRequest method.
5182// req, resp := client.PutBucketReplicationRequest(params)
5183//
5184// err := req.Send()
5185// if err == nil { // resp is now filled
5186// fmt.Println(resp)
5187// }
5188//
5189// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
5190func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) {
5191 op := &request.Operation{
5192 Name: opPutBucketReplication,
5193 HTTPMethod: "PUT",
5194 HTTPPath: "/{Bucket}?replication",
5195 }
5196
5197 if input == nil {
5198 input = &PutBucketReplicationInput{}
5199 }
5200
5201 output = &PutBucketReplicationOutput{}
5202 req = c.newRequest(op, input, output)
5203 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
5204 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
5205 return
5206}
5207
5208// PutBucketReplication API operation for Amazon Simple Storage Service.
5209//
5210// Creates a new replication configuration (or replaces an existing one, if
5211// present).
5212//
5213// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5214// with awserr.Error's Code and Message methods to get detailed information about
5215// the error.
5216//
5217// See the AWS API reference guide for Amazon Simple Storage Service's
5218// API operation PutBucketReplication for usage and error information.
5219// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
5220func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
5221 req, out := c.PutBucketReplicationRequest(input)
5222 return out, req.Send()
5223}
5224
5225// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of
5226// the ability to pass a context and additional request options.
5227//
5228// See PutBucketReplication for details on how to use this API operation.
5229//
5230// The context must be non-nil and will be used for request cancellation. If
5231// the context is nil a panic will occur. In the future the SDK may create
5232// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5233// for more information on using Contexts.
5234func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) {
5235 req, out := c.PutBucketReplicationRequest(input)
5236 req.SetContext(ctx)
5237 req.ApplyOptions(opts...)
5238 return out, req.Send()
5239}
5240
5241const opPutBucketRequestPayment = "PutBucketRequestPayment"
5242
5243// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the
5244// client's request for the PutBucketRequestPayment operation. The "output" return
5245// value can be used to capture response data after the request's "Send" method
5246// is called.
5247//
5248// See PutBucketRequestPayment for usage and error information.
5249//
5250// Creating a request object using this method should be used when you want to inject
5251// custom logic into the request's lifecycle using a custom handler, or if you want to
5252// access properties on the request object before or after sending the request. If
5253// you just want the service response, call the PutBucketRequestPayment method directly
5254// instead.
5255//
5256// Note: You must call the "Send" method on the returned request object in order
5257// to execute the request.
5258//
5259// // Example sending a request using the PutBucketRequestPaymentRequest method.
5260// req, resp := client.PutBucketRequestPaymentRequest(params)
5261//
5262// err := req.Send()
5263// if err == nil { // resp is now filled
5264// fmt.Println(resp)
5265// }
5266//
5267// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment
5268func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) {
5269 op := &request.Operation{
5270 Name: opPutBucketRequestPayment,
5271 HTTPMethod: "PUT",
5272 HTTPPath: "/{Bucket}?requestPayment",
5273 }
5274
5275 if input == nil {
5276 input = &PutBucketRequestPaymentInput{}
5277 }
5278
5279 output = &PutBucketRequestPaymentOutput{}
5280 req = c.newRequest(op, input, output)
5281 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
5282 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
5283 return
5284}
5285
5286// PutBucketRequestPayment API operation for Amazon Simple Storage Service.
5287//
5288// Sets the request payment configuration for a bucket. By default, the bucket
5289// owner pays for downloads from the bucket. This configuration parameter enables
5290// the bucket owner (only) to specify that the person requesting the download
5291// will be charged for the download. Documentation on requester pays buckets
5292// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
5293//
5294// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5295// with awserr.Error's Code and Message methods to get detailed information about
5296// the error.
5297//
5298// See the AWS API reference guide for Amazon Simple Storage Service's
5299// API operation PutBucketRequestPayment for usage and error information.
5300// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment
5301func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) {
5302 req, out := c.PutBucketRequestPaymentRequest(input)
5303 return out, req.Send()
5304}
5305
5306// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of
5307// the ability to pass a context and additional request options.
5308//
5309// See PutBucketRequestPayment for details on how to use this API operation.
5310//
5311// The context must be non-nil and will be used for request cancellation. If
5312// the context is nil a panic will occur. In the future the SDK may create
5313// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5314// for more information on using Contexts.
5315func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) {
5316 req, out := c.PutBucketRequestPaymentRequest(input)
5317 req.SetContext(ctx)
5318 req.ApplyOptions(opts...)
5319 return out, req.Send()
5320}
5321
5322const opPutBucketTagging = "PutBucketTagging"
5323
5324// PutBucketTaggingRequest generates a "aws/request.Request" representing the
5325// client's request for the PutBucketTagging operation. The "output" return
5326// value can be used to capture response data after the request's "Send" method
5327// is called.
5328//
5329// See PutBucketTagging for usage and error information.
5330//
5331// Creating a request object using this method should be used when you want to inject
5332// custom logic into the request's lifecycle using a custom handler, or if you want to
5333// access properties on the request object before or after sending the request. If
5334// you just want the service response, call the PutBucketTagging method directly
5335// instead.
5336//
5337// Note: You must call the "Send" method on the returned request object in order
5338// to execute the request.
5339//
5340// // Example sending a request using the PutBucketTaggingRequest method.
5341// req, resp := client.PutBucketTaggingRequest(params)
5342//
5343// err := req.Send()
5344// if err == nil { // resp is now filled
5345// fmt.Println(resp)
5346// }
5347//
5348// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging
5349func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) {
5350 op := &request.Operation{
5351 Name: opPutBucketTagging,
5352 HTTPMethod: "PUT",
5353 HTTPPath: "/{Bucket}?tagging",
5354 }
5355
5356 if input == nil {
5357 input = &PutBucketTaggingInput{}
5358 }
5359
5360 output = &PutBucketTaggingOutput{}
5361 req = c.newRequest(op, input, output)
5362 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
5363 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
5364 return
5365}
5366
5367// PutBucketTagging API operation for Amazon Simple Storage Service.
5368//
5369// Sets the tags for a bucket.
5370//
5371// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5372// with awserr.Error's Code and Message methods to get detailed information about
5373// the error.
5374//
5375// See the AWS API reference guide for Amazon Simple Storage Service's
5376// API operation PutBucketTagging for usage and error information.
5377// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging
5378func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) {
5379 req, out := c.PutBucketTaggingRequest(input)
5380 return out, req.Send()
5381}
5382
5383// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of
5384// the ability to pass a context and additional request options.
5385//
5386// See PutBucketTagging for details on how to use this API operation.
5387//
5388// The context must be non-nil and will be used for request cancellation. If
5389// the context is nil a panic will occur. In the future the SDK may create
5390// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5391// for more information on using Contexts.
5392func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) {
5393 req, out := c.PutBucketTaggingRequest(input)
5394 req.SetContext(ctx)
5395 req.ApplyOptions(opts...)
5396 return out, req.Send()
5397}
5398
5399const opPutBucketVersioning = "PutBucketVersioning"
5400
5401// PutBucketVersioningRequest generates a "aws/request.Request" representing the
5402// client's request for the PutBucketVersioning operation. The "output" return
5403// value can be used to capture response data after the request's "Send" method
5404// is called.
5405//
5406// See PutBucketVersioning for usage and error information.
5407//
5408// Creating a request object using this method should be used when you want to inject
5409// custom logic into the request's lifecycle using a custom handler, or if you want to
5410// access properties on the request object before or after sending the request. If
5411// you just want the service response, call the PutBucketVersioning method directly
5412// instead.
5413//
5414// Note: You must call the "Send" method on the returned request object in order
5415// to execute the request.
5416//
5417// // Example sending a request using the PutBucketVersioningRequest method.
5418// req, resp := client.PutBucketVersioningRequest(params)
5419//
5420// err := req.Send()
5421// if err == nil { // resp is now filled
5422// fmt.Println(resp)
5423// }
5424//
5425// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
5426func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) {
5427 op := &request.Operation{
5428 Name: opPutBucketVersioning,
5429 HTTPMethod: "PUT",
5430 HTTPPath: "/{Bucket}?versioning",
5431 }
5432
5433 if input == nil {
5434 input = &PutBucketVersioningInput{}
5435 }
5436
5437 output = &PutBucketVersioningOutput{}
5438 req = c.newRequest(op, input, output)
5439 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
5440 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
5441 return
5442}
5443
5444// PutBucketVersioning API operation for Amazon Simple Storage Service.
5445//
5446// Sets the versioning state of an existing bucket. To set the versioning state,
5447// you must be the bucket owner.
5448//
5449// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5450// with awserr.Error's Code and Message methods to get detailed information about
5451// the error.
5452//
5453// See the AWS API reference guide for Amazon Simple Storage Service's
5454// API operation PutBucketVersioning for usage and error information.
5455// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
5456func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) {
5457 req, out := c.PutBucketVersioningRequest(input)
5458 return out, req.Send()
5459}
5460
5461// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of
5462// the ability to pass a context and additional request options.
5463//
5464// See PutBucketVersioning for details on how to use this API operation.
5465//
5466// The context must be non-nil and will be used for request cancellation. If
5467// the context is nil a panic will occur. In the future the SDK may create
5468// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5469// for more information on using Contexts.
5470func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) {
5471 req, out := c.PutBucketVersioningRequest(input)
5472 req.SetContext(ctx)
5473 req.ApplyOptions(opts...)
5474 return out, req.Send()
5475}
5476
5477const opPutBucketWebsite = "PutBucketWebsite"
5478
5479// PutBucketWebsiteRequest generates a "aws/request.Request" representing the
5480// client's request for the PutBucketWebsite operation. The "output" return
5481// value can be used to capture response data after the request's "Send" method
5482// is called.
5483//
5484// See PutBucketWebsite for usage and error information.
5485//
5486// Creating a request object using this method should be used when you want to inject
5487// custom logic into the request's lifecycle using a custom handler, or if you want to
5488// access properties on the request object before or after sending the request. If
5489// you just want the service response, call the PutBucketWebsite method directly
5490// instead.
5491//
5492// Note: You must call the "Send" method on the returned request object in order
5493// to execute the request.
5494//
5495// // Example sending a request using the PutBucketWebsiteRequest method.
5496// req, resp := client.PutBucketWebsiteRequest(params)
5497//
5498// err := req.Send()
5499// if err == nil { // resp is now filled
5500// fmt.Println(resp)
5501// }
5502//
5503// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
5504func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) {
5505 op := &request.Operation{
5506 Name: opPutBucketWebsite,
5507 HTTPMethod: "PUT",
5508 HTTPPath: "/{Bucket}?website",
5509 }
5510
5511 if input == nil {
5512 input = &PutBucketWebsiteInput{}
5513 }
5514
5515 output = &PutBucketWebsiteOutput{}
5516 req = c.newRequest(op, input, output)
5517 req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
5518 req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
5519 return
5520}
5521
5522// PutBucketWebsite API operation for Amazon Simple Storage Service.
5523//
5524// Set the website configuration for a bucket.
5525//
5526// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5527// with awserr.Error's Code and Message methods to get detailed information about
5528// the error.
5529//
5530// See the AWS API reference guide for Amazon Simple Storage Service's
5531// API operation PutBucketWebsite for usage and error information.
5532// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
5533func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) {
5534 req, out := c.PutBucketWebsiteRequest(input)
5535 return out, req.Send()
5536}
5537
5538// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of
5539// the ability to pass a context and additional request options.
5540//
5541// See PutBucketWebsite for details on how to use this API operation.
5542//
5543// The context must be non-nil and will be used for request cancellation. If
5544// the context is nil a panic will occur. In the future the SDK may create
5545// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5546// for more information on using Contexts.
5547func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) {
5548 req, out := c.PutBucketWebsiteRequest(input)
5549 req.SetContext(ctx)
5550 req.ApplyOptions(opts...)
5551 return out, req.Send()
5552}
5553
5554const opPutObject = "PutObject"
5555
5556// PutObjectRequest generates a "aws/request.Request" representing the
5557// client's request for the PutObject operation. The "output" return
5558// value can be used to capture response data after the request's "Send" method
5559// is called.
5560//
5561// See PutObject for usage and error information.
5562//
5563// Creating a request object using this method should be used when you want to inject
5564// custom logic into the request's lifecycle using a custom handler, or if you want to
5565// access properties on the request object before or after sending the request. If
5566// you just want the service response, call the PutObject method directly
5567// instead.
5568//
5569// Note: You must call the "Send" method on the returned request object in order
5570// to execute the request.
5571//
5572// // Example sending a request using the PutObjectRequest method.
5573// req, resp := client.PutObjectRequest(params)
5574//
5575// err := req.Send()
5576// if err == nil { // resp is now filled
5577// fmt.Println(resp)
5578// }
5579//
5580// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
5581func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) {
5582 op := &request.Operation{
5583 Name: opPutObject,
5584 HTTPMethod: "PUT",
5585 HTTPPath: "/{Bucket}/{Key+}",
5586 }
5587
5588 if input == nil {
5589 input = &PutObjectInput{}
5590 }
5591
5592 output = &PutObjectOutput{}
5593 req = c.newRequest(op, input, output)
5594 return
5595}
5596
5597// PutObject API operation for Amazon Simple Storage Service.
5598//
5599// Adds an object to a bucket.
5600//
5601// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5602// with awserr.Error's Code and Message methods to get detailed information about
5603// the error.
5604//
5605// See the AWS API reference guide for Amazon Simple Storage Service's
5606// API operation PutObject for usage and error information.
5607// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
5608func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) {
5609 req, out := c.PutObjectRequest(input)
5610 return out, req.Send()
5611}
5612
5613// PutObjectWithContext is the same as PutObject with the addition of
5614// the ability to pass a context and additional request options.
5615//
5616// See PutObject for details on how to use this API operation.
5617//
5618// The context must be non-nil and will be used for request cancellation. If
5619// the context is nil a panic will occur. In the future the SDK may create
5620// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5621// for more information on using Contexts.
5622func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) {
5623 req, out := c.PutObjectRequest(input)
5624 req.SetContext(ctx)
5625 req.ApplyOptions(opts...)
5626 return out, req.Send()
5627}
5628
5629const opPutObjectAcl = "PutObjectAcl"
5630
5631// PutObjectAclRequest generates a "aws/request.Request" representing the
5632// client's request for the PutObjectAcl operation. The "output" return
5633// value can be used to capture response data after the request's "Send" method
5634// is called.
5635//
5636// See PutObjectAcl for usage and error information.
5637//
5638// Creating a request object using this method should be used when you want to inject
5639// custom logic into the request's lifecycle using a custom handler, or if you want to
5640// access properties on the request object before or after sending the request. If
5641// you just want the service response, call the PutObjectAcl method directly
5642// instead.
5643//
5644// Note: You must call the "Send" method on the returned request object in order
5645// to execute the request.
5646//
5647// // Example sending a request using the PutObjectAclRequest method.
5648// req, resp := client.PutObjectAclRequest(params)
5649//
5650// err := req.Send()
5651// if err == nil { // resp is now filled
5652// fmt.Println(resp)
5653// }
5654//
5655// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
5656func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) {
5657 op := &request.Operation{
5658 Name: opPutObjectAcl,
5659 HTTPMethod: "PUT",
5660 HTTPPath: "/{Bucket}/{Key+}?acl",
5661 }
5662
5663 if input == nil {
5664 input = &PutObjectAclInput{}
5665 }
5666
5667 output = &PutObjectAclOutput{}
5668 req = c.newRequest(op, input, output)
5669 return
5670}
5671
5672// PutObjectAcl API operation for Amazon Simple Storage Service.
5673//
5674// uses the acl subresource to set the access control list (ACL) permissions
5675// for an object that already exists in a bucket
5676//
5677// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5678// with awserr.Error's Code and Message methods to get detailed information about
5679// the error.
5680//
5681// See the AWS API reference guide for Amazon Simple Storage Service's
5682// API operation PutObjectAcl for usage and error information.
5683//
5684// Returned Error Codes:
5685// * ErrCodeNoSuchKey "NoSuchKey"
5686// The specified key does not exist.
5687//
5688// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
5689func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) {
5690 req, out := c.PutObjectAclRequest(input)
5691 return out, req.Send()
5692}
5693
5694// PutObjectAclWithContext is the same as PutObjectAcl with the addition of
5695// the ability to pass a context and additional request options.
5696//
5697// See PutObjectAcl for details on how to use this API operation.
5698//
5699// The context must be non-nil and will be used for request cancellation. If
5700// the context is nil a panic will occur. In the future the SDK may create
5701// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5702// for more information on using Contexts.
5703func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) {
5704 req, out := c.PutObjectAclRequest(input)
5705 req.SetContext(ctx)
5706 req.ApplyOptions(opts...)
5707 return out, req.Send()
5708}
5709
5710const opPutObjectTagging = "PutObjectTagging"
5711
5712// PutObjectTaggingRequest generates a "aws/request.Request" representing the
5713// client's request for the PutObjectTagging operation. The "output" return
5714// value can be used to capture response data after the request's "Send" method
5715// is called.
5716//
5717// See PutObjectTagging for usage and error information.
5718//
5719// Creating a request object using this method should be used when you want to inject
5720// custom logic into the request's lifecycle using a custom handler, or if you want to
5721// access properties on the request object before or after sending the request. If
5722// you just want the service response, call the PutObjectTagging method directly
5723// instead.
5724//
5725// Note: You must call the "Send" method on the returned request object in order
5726// to execute the request.
5727//
5728// // Example sending a request using the PutObjectTaggingRequest method.
5729// req, resp := client.PutObjectTaggingRequest(params)
5730//
5731// err := req.Send()
5732// if err == nil { // resp is now filled
5733// fmt.Println(resp)
5734// }
5735//
5736// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
5737func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) {
5738 op := &request.Operation{
5739 Name: opPutObjectTagging,
5740 HTTPMethod: "PUT",
5741 HTTPPath: "/{Bucket}/{Key+}?tagging",
5742 }
5743
5744 if input == nil {
5745 input = &PutObjectTaggingInput{}
5746 }
5747
5748 output = &PutObjectTaggingOutput{}
5749 req = c.newRequest(op, input, output)
5750 return
5751}
5752
5753// PutObjectTagging API operation for Amazon Simple Storage Service.
5754//
5755// Sets the supplied tag-set to an object that already exists in a bucket
5756//
5757// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5758// with awserr.Error's Code and Message methods to get detailed information about
5759// the error.
5760//
5761// See the AWS API reference guide for Amazon Simple Storage Service's
5762// API operation PutObjectTagging for usage and error information.
5763// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
5764func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) {
5765 req, out := c.PutObjectTaggingRequest(input)
5766 return out, req.Send()
5767}
5768
5769// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of
5770// the ability to pass a context and additional request options.
5771//
5772// See PutObjectTagging for details on how to use this API operation.
5773//
5774// The context must be non-nil and will be used for request cancellation. If
5775// the context is nil a panic will occur. In the future the SDK may create
5776// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5777// for more information on using Contexts.
5778func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) {
5779 req, out := c.PutObjectTaggingRequest(input)
5780 req.SetContext(ctx)
5781 req.ApplyOptions(opts...)
5782 return out, req.Send()
5783}
5784
5785const opRestoreObject = "RestoreObject"
5786
5787// RestoreObjectRequest generates a "aws/request.Request" representing the
5788// client's request for the RestoreObject operation. The "output" return
5789// value can be used to capture response data after the request's "Send" method
5790// is called.
5791//
5792// See RestoreObject for usage and error information.
5793//
5794// Creating a request object using this method should be used when you want to inject
5795// custom logic into the request's lifecycle using a custom handler, or if you want to
5796// access properties on the request object before or after sending the request. If
5797// you just want the service response, call the RestoreObject method directly
5798// instead.
5799//
5800// Note: You must call the "Send" method on the returned request object in order
5801// to execute the request.
5802//
5803// // Example sending a request using the RestoreObjectRequest method.
5804// req, resp := client.RestoreObjectRequest(params)
5805//
5806// err := req.Send()
5807// if err == nil { // resp is now filled
5808// fmt.Println(resp)
5809// }
5810//
5811// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
5812func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) {
5813 op := &request.Operation{
5814 Name: opRestoreObject,
5815 HTTPMethod: "POST",
5816 HTTPPath: "/{Bucket}/{Key+}?restore",
5817 }
5818
5819 if input == nil {
5820 input = &RestoreObjectInput{}
5821 }
5822
5823 output = &RestoreObjectOutput{}
5824 req = c.newRequest(op, input, output)
5825 return
5826}
5827
5828// RestoreObject API operation for Amazon Simple Storage Service.
5829//
5830// Restores an archived copy of an object back into Amazon S3
5831//
5832// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5833// with awserr.Error's Code and Message methods to get detailed information about
5834// the error.
5835//
5836// See the AWS API reference guide for Amazon Simple Storage Service's
5837// API operation RestoreObject for usage and error information.
5838//
5839// Returned Error Codes:
5840// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError"
5841// This operation is not allowed against this storage tier
5842//
5843// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
5844func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) {
5845 req, out := c.RestoreObjectRequest(input)
5846 return out, req.Send()
5847}
5848
5849// RestoreObjectWithContext is the same as RestoreObject with the addition of
5850// the ability to pass a context and additional request options.
5851//
5852// See RestoreObject for details on how to use this API operation.
5853//
5854// The context must be non-nil and will be used for request cancellation. If
5855// the context is nil a panic will occur. In the future the SDK may create
5856// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5857// for more information on using Contexts.
5858func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) {
5859 req, out := c.RestoreObjectRequest(input)
5860 req.SetContext(ctx)
5861 req.ApplyOptions(opts...)
5862 return out, req.Send()
5863}
5864
5865const opUploadPart = "UploadPart"
5866
5867// UploadPartRequest generates a "aws/request.Request" representing the
5868// client's request for the UploadPart operation. The "output" return
5869// value can be used to capture response data after the request's "Send" method
5870// is called.
5871//
5872// See UploadPart for usage and error information.
5873//
5874// Creating a request object using this method should be used when you want to inject
5875// custom logic into the request's lifecycle using a custom handler, or if you want to
5876// access properties on the request object before or after sending the request. If
5877// you just want the service response, call the UploadPart method directly
5878// instead.
5879//
5880// Note: You must call the "Send" method on the returned request object in order
5881// to execute the request.
5882//
5883// // Example sending a request using the UploadPartRequest method.
5884// req, resp := client.UploadPartRequest(params)
5885//
5886// err := req.Send()
5887// if err == nil { // resp is now filled
5888// fmt.Println(resp)
5889// }
5890//
5891// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
5892func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) {
5893 op := &request.Operation{
5894 Name: opUploadPart,
5895 HTTPMethod: "PUT",
5896 HTTPPath: "/{Bucket}/{Key+}",
5897 }
5898
5899 if input == nil {
5900 input = &UploadPartInput{}
5901 }
5902
5903 output = &UploadPartOutput{}
5904 req = c.newRequest(op, input, output)
5905 return
5906}
5907
5908// UploadPart API operation for Amazon Simple Storage Service.
5909//
5910// Uploads a part in a multipart upload.
5911//
5912// Note: After you initiate multipart upload and upload one or more parts, you
5913// must either complete or abort multipart upload in order to stop getting charged
5914// for storage of the uploaded parts. Only after you either complete or abort
5915// multipart upload, Amazon S3 frees up the parts storage and stops charging
5916// you for the parts storage.
5917//
5918// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5919// with awserr.Error's Code and Message methods to get detailed information about
5920// the error.
5921//
5922// See the AWS API reference guide for Amazon Simple Storage Service's
5923// API operation UploadPart for usage and error information.
5924// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
5925func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) {
5926 req, out := c.UploadPartRequest(input)
5927 return out, req.Send()
5928}
5929
5930// UploadPartWithContext is the same as UploadPart with the addition of
5931// the ability to pass a context and additional request options.
5932//
5933// See UploadPart for details on how to use this API operation.
5934//
5935// The context must be non-nil and will be used for request cancellation. If
5936// the context is nil a panic will occur. In the future the SDK may create
5937// sub-contexts for http.Requests. See https://golang.org/pkg/context/
5938// for more information on using Contexts.
5939func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) {
5940 req, out := c.UploadPartRequest(input)
5941 req.SetContext(ctx)
5942 req.ApplyOptions(opts...)
5943 return out, req.Send()
5944}
5945
5946const opUploadPartCopy = "UploadPartCopy"
5947
5948// UploadPartCopyRequest generates a "aws/request.Request" representing the
5949// client's request for the UploadPartCopy operation. The "output" return
5950// value can be used to capture response data after the request's "Send" method
5951// is called.
5952//
5953// See UploadPartCopy for usage and error information.
5954//
5955// Creating a request object using this method should be used when you want to inject
5956// custom logic into the request's lifecycle using a custom handler, or if you want to
5957// access properties on the request object before or after sending the request. If
5958// you just want the service response, call the UploadPartCopy method directly
5959// instead.
5960//
5961// Note: You must call the "Send" method on the returned request object in order
5962// to execute the request.
5963//
5964// // Example sending a request using the UploadPartCopyRequest method.
5965// req, resp := client.UploadPartCopyRequest(params)
5966//
5967// err := req.Send()
5968// if err == nil { // resp is now filled
5969// fmt.Println(resp)
5970// }
5971//
5972// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
5973func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) {
5974 op := &request.Operation{
5975 Name: opUploadPartCopy,
5976 HTTPMethod: "PUT",
5977 HTTPPath: "/{Bucket}/{Key+}",
5978 }
5979
5980 if input == nil {
5981 input = &UploadPartCopyInput{}
5982 }
5983
5984 output = &UploadPartCopyOutput{}
5985 req = c.newRequest(op, input, output)
5986 return
5987}
5988
5989// UploadPartCopy API operation for Amazon Simple Storage Service.
5990//
5991// Uploads a part by copying data from an existing object as data source.
5992//
5993// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
5994// with awserr.Error's Code and Message methods to get detailed information about
5995// the error.
5996//
5997// See the AWS API reference guide for Amazon Simple Storage Service's
5998// API operation UploadPartCopy for usage and error information.
5999// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
6000func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) {
6001 req, out := c.UploadPartCopyRequest(input)
6002 return out, req.Send()
6003}
6004
6005// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of
6006// the ability to pass a context and additional request options.
6007//
6008// See UploadPartCopy for details on how to use this API operation.
6009//
6010// The context must be non-nil and will be used for request cancellation. If
6011// the context is nil a panic will occur. In the future the SDK may create
6012// sub-contexts for http.Requests. See https://golang.org/pkg/context/
6013// for more information on using Contexts.
6014func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) {
6015 req, out := c.UploadPartCopyRequest(input)
6016 req.SetContext(ctx)
6017 req.ApplyOptions(opts...)
6018 return out, req.Send()
6019}
6020
6021// Specifies the days since the initiation of an Incomplete Multipart Upload
6022// that Lifecycle will wait before permanently removing all parts of the upload.
6023// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload
6024type AbortIncompleteMultipartUpload struct {
6025 _ struct{} `type:"structure"`
6026
6027 // Indicates the number of days that must pass since initiation for Lifecycle
6028 // to abort an Incomplete Multipart Upload.
6029 DaysAfterInitiation *int64 `type:"integer"`
6030}
6031
6032// String returns the string representation
6033func (s AbortIncompleteMultipartUpload) String() string {
6034 return awsutil.Prettify(s)
6035}
6036
6037// GoString returns the string representation
6038func (s AbortIncompleteMultipartUpload) GoString() string {
6039 return s.String()
6040}
6041
6042// SetDaysAfterInitiation sets the DaysAfterInitiation field's value.
6043func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload {
6044 s.DaysAfterInitiation = &v
6045 return s
6046}
6047
6048// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest
6049type AbortMultipartUploadInput struct {
6050 _ struct{} `type:"structure"`
6051
6052 // Bucket is a required field
6053 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
6054
6055 // Key is a required field
6056 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
6057
6058 // Confirms that the requester knows that she or he will be charged for the
6059 // request. Bucket owners need not specify this parameter in their requests.
6060 // Documentation on downloading objects from requester pays buckets can be found
6061 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
6062 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
6063
6064 // UploadId is a required field
6065 UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
6066}
6067
6068// String returns the string representation
6069func (s AbortMultipartUploadInput) String() string {
6070 return awsutil.Prettify(s)
6071}
6072
6073// GoString returns the string representation
6074func (s AbortMultipartUploadInput) GoString() string {
6075 return s.String()
6076}
6077
6078// Validate inspects the fields of the type to determine if they are valid.
6079func (s *AbortMultipartUploadInput) Validate() error {
6080 invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"}
6081 if s.Bucket == nil {
6082 invalidParams.Add(request.NewErrParamRequired("Bucket"))
6083 }
6084 if s.Key == nil {
6085 invalidParams.Add(request.NewErrParamRequired("Key"))
6086 }
6087 if s.Key != nil && len(*s.Key) < 1 {
6088 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
6089 }
6090 if s.UploadId == nil {
6091 invalidParams.Add(request.NewErrParamRequired("UploadId"))
6092 }
6093
6094 if invalidParams.Len() > 0 {
6095 return invalidParams
6096 }
6097 return nil
6098}
6099
6100// SetBucket sets the Bucket field's value.
6101func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput {
6102 s.Bucket = &v
6103 return s
6104}
6105
6106// SetKey sets the Key field's value.
6107func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput {
6108 s.Key = &v
6109 return s
6110}
6111
6112// SetRequestPayer sets the RequestPayer field's value.
6113func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput {
6114 s.RequestPayer = &v
6115 return s
6116}
6117
6118// SetUploadId sets the UploadId field's value.
6119func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput {
6120 s.UploadId = &v
6121 return s
6122}
6123
6124// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput
6125type AbortMultipartUploadOutput struct {
6126 _ struct{} `type:"structure"`
6127
6128 // If present, indicates that the requester was successfully charged for the
6129 // request.
6130 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
6131}
6132
6133// String returns the string representation
6134func (s AbortMultipartUploadOutput) String() string {
6135 return awsutil.Prettify(s)
6136}
6137
6138// GoString returns the string representation
6139func (s AbortMultipartUploadOutput) GoString() string {
6140 return s.String()
6141}
6142
6143// SetRequestCharged sets the RequestCharged field's value.
6144func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput {
6145 s.RequestCharged = &v
6146 return s
6147}
6148
6149// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration
6150type AccelerateConfiguration struct {
6151 _ struct{} `type:"structure"`
6152
6153 // The accelerate configuration of the bucket.
6154 Status *string `type:"string" enum:"BucketAccelerateStatus"`
6155}
6156
6157// String returns the string representation
6158func (s AccelerateConfiguration) String() string {
6159 return awsutil.Prettify(s)
6160}
6161
6162// GoString returns the string representation
6163func (s AccelerateConfiguration) GoString() string {
6164 return s.String()
6165}
6166
6167// SetStatus sets the Status field's value.
6168func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration {
6169 s.Status = &v
6170 return s
6171}
6172
6173// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy
6174type AccessControlPolicy struct {
6175 _ struct{} `type:"structure"`
6176
6177 // A list of grants.
6178 Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
6179
6180 Owner *Owner `type:"structure"`
6181}
6182
6183// String returns the string representation
6184func (s AccessControlPolicy) String() string {
6185 return awsutil.Prettify(s)
6186}
6187
6188// GoString returns the string representation
6189func (s AccessControlPolicy) GoString() string {
6190 return s.String()
6191}
6192
6193// Validate inspects the fields of the type to determine if they are valid.
6194func (s *AccessControlPolicy) Validate() error {
6195 invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"}
6196 if s.Grants != nil {
6197 for i, v := range s.Grants {
6198 if v == nil {
6199 continue
6200 }
6201 if err := v.Validate(); err != nil {
6202 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams))
6203 }
6204 }
6205 }
6206
6207 if invalidParams.Len() > 0 {
6208 return invalidParams
6209 }
6210 return nil
6211}
6212
6213// SetGrants sets the Grants field's value.
6214func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy {
6215 s.Grants = v
6216 return s
6217}
6218
6219// SetOwner sets the Owner field's value.
6220func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy {
6221 s.Owner = v
6222 return s
6223}
6224
6225// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator
6226type AnalyticsAndOperator struct {
6227 _ struct{} `type:"structure"`
6228
6229 // The prefix to use when evaluating an AND predicate.
6230 Prefix *string `type:"string"`
6231
6232 // The list of tags to use when evaluating an AND predicate.
6233 Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
6234}
6235
6236// String returns the string representation
6237func (s AnalyticsAndOperator) String() string {
6238 return awsutil.Prettify(s)
6239}
6240
6241// GoString returns the string representation
6242func (s AnalyticsAndOperator) GoString() string {
6243 return s.String()
6244}
6245
6246// Validate inspects the fields of the type to determine if they are valid.
6247func (s *AnalyticsAndOperator) Validate() error {
6248 invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"}
6249 if s.Tags != nil {
6250 for i, v := range s.Tags {
6251 if v == nil {
6252 continue
6253 }
6254 if err := v.Validate(); err != nil {
6255 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
6256 }
6257 }
6258 }
6259
6260 if invalidParams.Len() > 0 {
6261 return invalidParams
6262 }
6263 return nil
6264}
6265
6266// SetPrefix sets the Prefix field's value.
6267func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator {
6268 s.Prefix = &v
6269 return s
6270}
6271
6272// SetTags sets the Tags field's value.
6273func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator {
6274 s.Tags = v
6275 return s
6276}
6277
6278// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration
6279type AnalyticsConfiguration struct {
6280 _ struct{} `type:"structure"`
6281
6282 // The filter used to describe a set of objects for analyses. A filter must
6283 // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator).
6284 // If no filter is provided, all objects will be considered in any analysis.
6285 Filter *AnalyticsFilter `type:"structure"`
6286
6287 // The identifier used to represent an analytics configuration.
6288 //
6289 // Id is a required field
6290 Id *string `type:"string" required:"true"`
6291
6292 // If present, it indicates that data related to access patterns will be collected
6293 // and made available to analyze the tradeoffs between different storage classes.
6294 //
6295 // StorageClassAnalysis is a required field
6296 StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"`
6297}
6298
6299// String returns the string representation
6300func (s AnalyticsConfiguration) String() string {
6301 return awsutil.Prettify(s)
6302}
6303
6304// GoString returns the string representation
6305func (s AnalyticsConfiguration) GoString() string {
6306 return s.String()
6307}
6308
6309// Validate inspects the fields of the type to determine if they are valid.
6310func (s *AnalyticsConfiguration) Validate() error {
6311 invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"}
6312 if s.Id == nil {
6313 invalidParams.Add(request.NewErrParamRequired("Id"))
6314 }
6315 if s.StorageClassAnalysis == nil {
6316 invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis"))
6317 }
6318 if s.Filter != nil {
6319 if err := s.Filter.Validate(); err != nil {
6320 invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
6321 }
6322 }
6323 if s.StorageClassAnalysis != nil {
6324 if err := s.StorageClassAnalysis.Validate(); err != nil {
6325 invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams))
6326 }
6327 }
6328
6329 if invalidParams.Len() > 0 {
6330 return invalidParams
6331 }
6332 return nil
6333}
6334
6335// SetFilter sets the Filter field's value.
6336func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration {
6337 s.Filter = v
6338 return s
6339}
6340
6341// SetId sets the Id field's value.
6342func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration {
6343 s.Id = &v
6344 return s
6345}
6346
6347// SetStorageClassAnalysis sets the StorageClassAnalysis field's value.
6348func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration {
6349 s.StorageClassAnalysis = v
6350 return s
6351}
6352
6353// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination
6354type AnalyticsExportDestination struct {
6355 _ struct{} `type:"structure"`
6356
6357 // A destination signifying output to an S3 bucket.
6358 //
6359 // S3BucketDestination is a required field
6360 S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"`
6361}
6362
6363// String returns the string representation
6364func (s AnalyticsExportDestination) String() string {
6365 return awsutil.Prettify(s)
6366}
6367
6368// GoString returns the string representation
6369func (s AnalyticsExportDestination) GoString() string {
6370 return s.String()
6371}
6372
6373// Validate inspects the fields of the type to determine if they are valid.
6374func (s *AnalyticsExportDestination) Validate() error {
6375 invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"}
6376 if s.S3BucketDestination == nil {
6377 invalidParams.Add(request.NewErrParamRequired("S3BucketDestination"))
6378 }
6379 if s.S3BucketDestination != nil {
6380 if err := s.S3BucketDestination.Validate(); err != nil {
6381 invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams))
6382 }
6383 }
6384
6385 if invalidParams.Len() > 0 {
6386 return invalidParams
6387 }
6388 return nil
6389}
6390
6391// SetS3BucketDestination sets the S3BucketDestination field's value.
6392func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination {
6393 s.S3BucketDestination = v
6394 return s
6395}
6396
6397// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter
6398type AnalyticsFilter struct {
6399 _ struct{} `type:"structure"`
6400
6401 // A conjunction (logical AND) of predicates, which is used in evaluating an
6402 // analytics filter. The operator must have at least two predicates.
6403 And *AnalyticsAndOperator `type:"structure"`
6404
6405 // The prefix to use when evaluating an analytics filter.
6406 Prefix *string `type:"string"`
6407
6408 // The tag to use when evaluating an analytics filter.
6409 Tag *Tag `type:"structure"`
6410}
6411
6412// String returns the string representation
6413func (s AnalyticsFilter) String() string {
6414 return awsutil.Prettify(s)
6415}
6416
6417// GoString returns the string representation
6418func (s AnalyticsFilter) GoString() string {
6419 return s.String()
6420}
6421
6422// Validate inspects the fields of the type to determine if they are valid.
6423func (s *AnalyticsFilter) Validate() error {
6424 invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"}
6425 if s.And != nil {
6426 if err := s.And.Validate(); err != nil {
6427 invalidParams.AddNested("And", err.(request.ErrInvalidParams))
6428 }
6429 }
6430 if s.Tag != nil {
6431 if err := s.Tag.Validate(); err != nil {
6432 invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
6433 }
6434 }
6435
6436 if invalidParams.Len() > 0 {
6437 return invalidParams
6438 }
6439 return nil
6440}
6441
6442// SetAnd sets the And field's value.
6443func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter {
6444 s.And = v
6445 return s
6446}
6447
6448// SetPrefix sets the Prefix field's value.
6449func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter {
6450 s.Prefix = &v
6451 return s
6452}
6453
6454// SetTag sets the Tag field's value.
6455func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter {
6456 s.Tag = v
6457 return s
6458}
6459
6460// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination
6461type AnalyticsS3BucketDestination struct {
6462 _ struct{} `type:"structure"`
6463
6464 // The Amazon resource name (ARN) of the bucket to which data is exported.
6465 //
6466 // Bucket is a required field
6467 Bucket *string `type:"string" required:"true"`
6468
6469 // The account ID that owns the destination bucket. If no account ID is provided,
6470 // the owner will not be validated prior to exporting data.
6471 BucketAccountId *string `type:"string"`
6472
6473 // The file format used when exporting data to Amazon S3.
6474 //
6475 // Format is a required field
6476 Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"`
6477
6478 // The prefix to use when exporting data. The exported data begins with this
6479 // prefix.
6480 Prefix *string `type:"string"`
6481}
6482
6483// String returns the string representation
6484func (s AnalyticsS3BucketDestination) String() string {
6485 return awsutil.Prettify(s)
6486}
6487
6488// GoString returns the string representation
6489func (s AnalyticsS3BucketDestination) GoString() string {
6490 return s.String()
6491}
6492
6493// Validate inspects the fields of the type to determine if they are valid.
6494func (s *AnalyticsS3BucketDestination) Validate() error {
6495 invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"}
6496 if s.Bucket == nil {
6497 invalidParams.Add(request.NewErrParamRequired("Bucket"))
6498 }
6499 if s.Format == nil {
6500 invalidParams.Add(request.NewErrParamRequired("Format"))
6501 }
6502
6503 if invalidParams.Len() > 0 {
6504 return invalidParams
6505 }
6506 return nil
6507}
6508
6509// SetBucket sets the Bucket field's value.
6510func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination {
6511 s.Bucket = &v
6512 return s
6513}
6514
6515// SetBucketAccountId sets the BucketAccountId field's value.
6516func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination {
6517 s.BucketAccountId = &v
6518 return s
6519}
6520
6521// SetFormat sets the Format field's value.
6522func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination {
6523 s.Format = &v
6524 return s
6525}
6526
6527// SetPrefix sets the Prefix field's value.
6528func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination {
6529 s.Prefix = &v
6530 return s
6531}
6532
6533// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket
6534type Bucket struct {
6535 _ struct{} `type:"structure"`
6536
6537 // Date the bucket was created.
6538 CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"`
6539
6540 // The name of the bucket.
6541 Name *string `type:"string"`
6542}
6543
6544// String returns the string representation
6545func (s Bucket) String() string {
6546 return awsutil.Prettify(s)
6547}
6548
6549// GoString returns the string representation
6550func (s Bucket) GoString() string {
6551 return s.String()
6552}
6553
6554// SetCreationDate sets the CreationDate field's value.
6555func (s *Bucket) SetCreationDate(v time.Time) *Bucket {
6556 s.CreationDate = &v
6557 return s
6558}
6559
6560// SetName sets the Name field's value.
6561func (s *Bucket) SetName(v string) *Bucket {
6562 s.Name = &v
6563 return s
6564}
6565
6566// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration
6567type BucketLifecycleConfiguration struct {
6568 _ struct{} `type:"structure"`
6569
6570 // Rules is a required field
6571 Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
6572}
6573
6574// String returns the string representation
6575func (s BucketLifecycleConfiguration) String() string {
6576 return awsutil.Prettify(s)
6577}
6578
6579// GoString returns the string representation
6580func (s BucketLifecycleConfiguration) GoString() string {
6581 return s.String()
6582}
6583
6584// Validate inspects the fields of the type to determine if they are valid.
6585func (s *BucketLifecycleConfiguration) Validate() error {
6586 invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"}
6587 if s.Rules == nil {
6588 invalidParams.Add(request.NewErrParamRequired("Rules"))
6589 }
6590 if s.Rules != nil {
6591 for i, v := range s.Rules {
6592 if v == nil {
6593 continue
6594 }
6595 if err := v.Validate(); err != nil {
6596 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
6597 }
6598 }
6599 }
6600
6601 if invalidParams.Len() > 0 {
6602 return invalidParams
6603 }
6604 return nil
6605}
6606
6607// SetRules sets the Rules field's value.
6608func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration {
6609 s.Rules = v
6610 return s
6611}
6612
6613// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus
6614type BucketLoggingStatus struct {
6615 _ struct{} `type:"structure"`
6616
6617 LoggingEnabled *LoggingEnabled `type:"structure"`
6618}
6619
6620// String returns the string representation
6621func (s BucketLoggingStatus) String() string {
6622 return awsutil.Prettify(s)
6623}
6624
6625// GoString returns the string representation
6626func (s BucketLoggingStatus) GoString() string {
6627 return s.String()
6628}
6629
6630// Validate inspects the fields of the type to determine if they are valid.
6631func (s *BucketLoggingStatus) Validate() error {
6632 invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"}
6633 if s.LoggingEnabled != nil {
6634 if err := s.LoggingEnabled.Validate(); err != nil {
6635 invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams))
6636 }
6637 }
6638
6639 if invalidParams.Len() > 0 {
6640 return invalidParams
6641 }
6642 return nil
6643}
6644
6645// SetLoggingEnabled sets the LoggingEnabled field's value.
6646func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus {
6647 s.LoggingEnabled = v
6648 return s
6649}
6650
6651// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration
6652type CORSConfiguration struct {
6653 _ struct{} `type:"structure"`
6654
6655 // CORSRules is a required field
6656 CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"`
6657}
6658
6659// String returns the string representation
6660func (s CORSConfiguration) String() string {
6661 return awsutil.Prettify(s)
6662}
6663
6664// GoString returns the string representation
6665func (s CORSConfiguration) GoString() string {
6666 return s.String()
6667}
6668
6669// Validate inspects the fields of the type to determine if they are valid.
6670func (s *CORSConfiguration) Validate() error {
6671 invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"}
6672 if s.CORSRules == nil {
6673 invalidParams.Add(request.NewErrParamRequired("CORSRules"))
6674 }
6675 if s.CORSRules != nil {
6676 for i, v := range s.CORSRules {
6677 if v == nil {
6678 continue
6679 }
6680 if err := v.Validate(); err != nil {
6681 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams))
6682 }
6683 }
6684 }
6685
6686 if invalidParams.Len() > 0 {
6687 return invalidParams
6688 }
6689 return nil
6690}
6691
6692// SetCORSRules sets the CORSRules field's value.
6693func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration {
6694 s.CORSRules = v
6695 return s
6696}
6697
6698// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule
6699type CORSRule struct {
6700 _ struct{} `type:"structure"`
6701
6702 // Specifies which headers are allowed in a pre-flight OPTIONS request.
6703 AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"`
6704
6705 // Identifies HTTP methods that the domain/origin specified in the rule is allowed
6706 // to execute.
6707 //
6708 // AllowedMethods is a required field
6709 AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"`
6710
6711 // One or more origins you want customers to be able to access the bucket from.
6712 //
6713 // AllowedOrigins is a required field
6714 AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"`
6715
6716 // One or more headers in the response that you want customers to be able to
6717 // access from their applications (for example, from a JavaScript XMLHttpRequest
6718 // object).
6719 ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"`
6720
6721 // The time in seconds that your browser is to cache the preflight response
6722 // for the specified resource.
6723 MaxAgeSeconds *int64 `type:"integer"`
6724}
6725
6726// String returns the string representation
6727func (s CORSRule) String() string {
6728 return awsutil.Prettify(s)
6729}
6730
6731// GoString returns the string representation
6732func (s CORSRule) GoString() string {
6733 return s.String()
6734}
6735
6736// Validate inspects the fields of the type to determine if they are valid.
6737func (s *CORSRule) Validate() error {
6738 invalidParams := request.ErrInvalidParams{Context: "CORSRule"}
6739 if s.AllowedMethods == nil {
6740 invalidParams.Add(request.NewErrParamRequired("AllowedMethods"))
6741 }
6742 if s.AllowedOrigins == nil {
6743 invalidParams.Add(request.NewErrParamRequired("AllowedOrigins"))
6744 }
6745
6746 if invalidParams.Len() > 0 {
6747 return invalidParams
6748 }
6749 return nil
6750}
6751
6752// SetAllowedHeaders sets the AllowedHeaders field's value.
6753func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule {
6754 s.AllowedHeaders = v
6755 return s
6756}
6757
6758// SetAllowedMethods sets the AllowedMethods field's value.
6759func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule {
6760 s.AllowedMethods = v
6761 return s
6762}
6763
6764// SetAllowedOrigins sets the AllowedOrigins field's value.
6765func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule {
6766 s.AllowedOrigins = v
6767 return s
6768}
6769
6770// SetExposeHeaders sets the ExposeHeaders field's value.
6771func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule {
6772 s.ExposeHeaders = v
6773 return s
6774}
6775
6776// SetMaxAgeSeconds sets the MaxAgeSeconds field's value.
6777func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule {
6778 s.MaxAgeSeconds = &v
6779 return s
6780}
6781
6782// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration
6783type CloudFunctionConfiguration struct {
6784 _ struct{} `type:"structure"`
6785
6786 CloudFunction *string `type:"string"`
6787
6788 // Bucket event for which to send notifications.
6789 Event *string `deprecated:"true" type:"string" enum:"Event"`
6790
6791 Events []*string `locationName:"Event" type:"list" flattened:"true"`
6792
6793 // Optional unique identifier for configurations in a notification configuration.
6794 // If you don't provide one, Amazon S3 will assign an ID.
6795 Id *string `type:"string"`
6796
6797 InvocationRole *string `type:"string"`
6798}
6799
6800// String returns the string representation
6801func (s CloudFunctionConfiguration) String() string {
6802 return awsutil.Prettify(s)
6803}
6804
6805// GoString returns the string representation
6806func (s CloudFunctionConfiguration) GoString() string {
6807 return s.String()
6808}
6809
6810// SetCloudFunction sets the CloudFunction field's value.
6811func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration {
6812 s.CloudFunction = &v
6813 return s
6814}
6815
6816// SetEvent sets the Event field's value.
6817func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration {
6818 s.Event = &v
6819 return s
6820}
6821
6822// SetEvents sets the Events field's value.
6823func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration {
6824 s.Events = v
6825 return s
6826}
6827
6828// SetId sets the Id field's value.
6829func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration {
6830 s.Id = &v
6831 return s
6832}
6833
6834// SetInvocationRole sets the InvocationRole field's value.
6835func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration {
6836 s.InvocationRole = &v
6837 return s
6838}
6839
6840// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix
6841type CommonPrefix struct {
6842 _ struct{} `type:"structure"`
6843
6844 Prefix *string `type:"string"`
6845}
6846
6847// String returns the string representation
6848func (s CommonPrefix) String() string {
6849 return awsutil.Prettify(s)
6850}
6851
6852// GoString returns the string representation
6853func (s CommonPrefix) GoString() string {
6854 return s.String()
6855}
6856
6857// SetPrefix sets the Prefix field's value.
6858func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix {
6859 s.Prefix = &v
6860 return s
6861}
6862
6863// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest
6864type CompleteMultipartUploadInput struct {
6865 _ struct{} `type:"structure" payload:"MultipartUpload"`
6866
6867 // Bucket is a required field
6868 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
6869
6870 // Key is a required field
6871 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
6872
6873 MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"`
6874
6875 // Confirms that the requester knows that she or he will be charged for the
6876 // request. Bucket owners need not specify this parameter in their requests.
6877 // Documentation on downloading objects from requester pays buckets can be found
6878 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
6879 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
6880
6881 // UploadId is a required field
6882 UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
6883}
6884
6885// String returns the string representation
6886func (s CompleteMultipartUploadInput) String() string {
6887 return awsutil.Prettify(s)
6888}
6889
6890// GoString returns the string representation
6891func (s CompleteMultipartUploadInput) GoString() string {
6892 return s.String()
6893}
6894
6895// Validate inspects the fields of the type to determine if they are valid.
6896func (s *CompleteMultipartUploadInput) Validate() error {
6897 invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"}
6898 if s.Bucket == nil {
6899 invalidParams.Add(request.NewErrParamRequired("Bucket"))
6900 }
6901 if s.Key == nil {
6902 invalidParams.Add(request.NewErrParamRequired("Key"))
6903 }
6904 if s.Key != nil && len(*s.Key) < 1 {
6905 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
6906 }
6907 if s.UploadId == nil {
6908 invalidParams.Add(request.NewErrParamRequired("UploadId"))
6909 }
6910
6911 if invalidParams.Len() > 0 {
6912 return invalidParams
6913 }
6914 return nil
6915}
6916
6917// SetBucket sets the Bucket field's value.
6918func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput {
6919 s.Bucket = &v
6920 return s
6921}
6922
6923// SetKey sets the Key field's value.
6924func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput {
6925 s.Key = &v
6926 return s
6927}
6928
6929// SetMultipartUpload sets the MultipartUpload field's value.
6930func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput {
6931 s.MultipartUpload = v
6932 return s
6933}
6934
6935// SetRequestPayer sets the RequestPayer field's value.
6936func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput {
6937 s.RequestPayer = &v
6938 return s
6939}
6940
6941// SetUploadId sets the UploadId field's value.
6942func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput {
6943 s.UploadId = &v
6944 return s
6945}
6946
6947// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput
6948type CompleteMultipartUploadOutput struct {
6949 _ struct{} `type:"structure"`
6950
6951 Bucket *string `type:"string"`
6952
6953 // Entity tag of the object.
6954 ETag *string `type:"string"`
6955
6956 // If the object expiration is configured, this will contain the expiration
6957 // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
6958 Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
6959
6960 Key *string `min:"1" type:"string"`
6961
6962 Location *string `type:"string"`
6963
6964 // If present, indicates that the requester was successfully charged for the
6965 // request.
6966 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
6967
6968 // If present, specifies the ID of the AWS Key Management Service (KMS) master
6969 // encryption key that was used for the object.
6970 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
6971
6972 // The Server-side encryption algorithm used when storing this object in S3
6973 // (e.g., AES256, aws:kms).
6974 ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
6975
6976 // Version of the object.
6977 VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
6978}
6979
6980// String returns the string representation
6981func (s CompleteMultipartUploadOutput) String() string {
6982 return awsutil.Prettify(s)
6983}
6984
6985// GoString returns the string representation
6986func (s CompleteMultipartUploadOutput) GoString() string {
6987 return s.String()
6988}
6989
6990// SetBucket sets the Bucket field's value.
6991func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput {
6992 s.Bucket = &v
6993 return s
6994}
6995
6996// SetETag sets the ETag field's value.
6997func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput {
6998 s.ETag = &v
6999 return s
7000}
7001
7002// SetExpiration sets the Expiration field's value.
7003func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput {
7004 s.Expiration = &v
7005 return s
7006}
7007
7008// SetKey sets the Key field's value.
7009func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput {
7010 s.Key = &v
7011 return s
7012}
7013
7014// SetLocation sets the Location field's value.
7015func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput {
7016 s.Location = &v
7017 return s
7018}
7019
7020// SetRequestCharged sets the RequestCharged field's value.
7021func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput {
7022 s.RequestCharged = &v
7023 return s
7024}
7025
7026// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
7027func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput {
7028 s.SSEKMSKeyId = &v
7029 return s
7030}
7031
7032// SetServerSideEncryption sets the ServerSideEncryption field's value.
7033func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput {
7034 s.ServerSideEncryption = &v
7035 return s
7036}
7037
7038// SetVersionId sets the VersionId field's value.
7039func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput {
7040 s.VersionId = &v
7041 return s
7042}
7043
7044// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload
7045type CompletedMultipartUpload struct {
7046 _ struct{} `type:"structure"`
7047
7048 Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"`
7049}
7050
7051// String returns the string representation
7052func (s CompletedMultipartUpload) String() string {
7053 return awsutil.Prettify(s)
7054}
7055
7056// GoString returns the string representation
7057func (s CompletedMultipartUpload) GoString() string {
7058 return s.String()
7059}
7060
7061// SetParts sets the Parts field's value.
7062func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload {
7063 s.Parts = v
7064 return s
7065}
7066
7067// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart
7068type CompletedPart struct {
7069 _ struct{} `type:"structure"`
7070
7071 // Entity tag returned when the part was uploaded.
7072 ETag *string `type:"string"`
7073
7074 // Part number that identifies the part. This is a positive integer between
7075 // 1 and 10,000.
7076 PartNumber *int64 `type:"integer"`
7077}
7078
7079// String returns the string representation
7080func (s CompletedPart) String() string {
7081 return awsutil.Prettify(s)
7082}
7083
7084// GoString returns the string representation
7085func (s CompletedPart) GoString() string {
7086 return s.String()
7087}
7088
7089// SetETag sets the ETag field's value.
7090func (s *CompletedPart) SetETag(v string) *CompletedPart {
7091 s.ETag = &v
7092 return s
7093}
7094
7095// SetPartNumber sets the PartNumber field's value.
7096func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart {
7097 s.PartNumber = &v
7098 return s
7099}
7100
7101// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition
7102type Condition struct {
7103 _ struct{} `type:"structure"`
7104
7105 // The HTTP error code when the redirect is applied. In the event of an error,
7106 // if the error code equals this value, then the specified redirect is applied.
7107 // Required when parent element Condition is specified and sibling KeyPrefixEquals
7108 // is not specified. If both are specified, then both must be true for the redirect
7109 // to be applied.
7110 HttpErrorCodeReturnedEquals *string `type:"string"`
7111
7112 // The object key name prefix when the redirect is applied. For example, to
7113 // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html.
7114 // To redirect request for all pages with the prefix docs/, the key prefix will
7115 // be /docs, which identifies all objects in the docs/ folder. Required when
7116 // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals
7117 // is not specified. If both conditions are specified, both must be true for
7118 // the redirect to be applied.
7119 KeyPrefixEquals *string `type:"string"`
7120}
7121
7122// String returns the string representation
7123func (s Condition) String() string {
7124 return awsutil.Prettify(s)
7125}
7126
7127// GoString returns the string representation
7128func (s Condition) GoString() string {
7129 return s.String()
7130}
7131
7132// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value.
7133func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition {
7134 s.HttpErrorCodeReturnedEquals = &v
7135 return s
7136}
7137
7138// SetKeyPrefixEquals sets the KeyPrefixEquals field's value.
7139func (s *Condition) SetKeyPrefixEquals(v string) *Condition {
7140 s.KeyPrefixEquals = &v
7141 return s
7142}
7143
7144// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest
7145type CopyObjectInput struct {
7146 _ struct{} `type:"structure"`
7147
7148 // The canned ACL to apply to the object.
7149 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
7150
7151 // Bucket is a required field
7152 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
7153
7154 // Specifies caching behavior along the request/reply chain.
7155 CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
7156
7157 // Specifies presentational information for the object.
7158 ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
7159
7160 // Specifies what content encodings have been applied to the object and thus
7161 // what decoding mechanisms must be applied to obtain the media-type referenced
7162 // by the Content-Type header field.
7163 ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
7164
7165 // The language the content is in.
7166 ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
7167
7168 // A standard MIME type describing the format of the object data.
7169 ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
7170
7171 // The name of the source bucket and key name of the source object, separated
7172 // by a slash (/). Must be URL-encoded.
7173 //
7174 // CopySource is a required field
7175 CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
7176
7177 // Copies the object if its entity tag (ETag) matches the specified tag.
7178 CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
7179
7180 // Copies the object if it has been modified since the specified time.
7181 CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
7182
7183 // Copies the object if its entity tag (ETag) is different than the specified
7184 // ETag.
7185 CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
7186
7187 // Copies the object if it hasn't been modified since the specified time.
7188 CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
7189
7190 // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
7191 CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
7192
7193 // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
7194 // the source object. The encryption key provided in this header must be one
7195 // that was used when the source object was created.
7196 CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"`
7197
7198 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
7199 // Amazon S3 uses this header for a message integrity check to ensure the encryption
7200 // key was transmitted without error.
7201 CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
7202
7203 // The date and time at which the object is no longer cacheable.
7204 Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
7205
7206 // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
7207 GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
7208
7209 // Allows grantee to read the object data and its metadata.
7210 GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
7211
7212 // Allows grantee to read the object ACL.
7213 GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
7214
7215 // Allows grantee to write the ACL for the applicable object.
7216 GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
7217
7218 // Key is a required field
7219 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
7220
7221 // A map of metadata to store with the object in S3.
7222 Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
7223
7224 // Specifies whether the metadata is copied from the source object or replaced
7225 // with metadata provided in the request.
7226 MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"`
7227
7228 // Confirms that the requester knows that she or he will be charged for the
7229 // request. Bucket owners need not specify this parameter in their requests.
7230 // Documentation on downloading objects from requester pays buckets can be found
7231 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
7232 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
7233
7234 // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
7235 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
7236
7237 // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
7238 // data. This value is used to store the object and then it is discarded; Amazon
7239 // does not store the encryption key. The key must be appropriate for use with
7240 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
7241 // header.
7242 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
7243
7244 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
7245 // Amazon S3 uses this header for a message integrity check to ensure the encryption
7246 // key was transmitted without error.
7247 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
7248
7249 // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
7250 // requests for an object protected by AWS KMS will fail if not made via SSL
7251 // or using SigV4. Documentation on configuring any of the officially supported
7252 // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
7253 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
7254
7255 // The Server-side encryption algorithm used when storing this object in S3
7256 // (e.g., AES256, aws:kms).
7257 ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
7258
7259 // The type of storage to use for the object. Defaults to 'STANDARD'.
7260 StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
7261
7262 // The tag-set for the object destination object this value must be used in
7263 // conjunction with the TaggingDirective. The tag-set must be encoded as URL
7264 // Query parameters
7265 Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
7266
7267 // Specifies whether the object tag-set are copied from the source object or
7268 // replaced with tag-set provided in the request.
7269 TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"`
7270
7271 // If the bucket is configured as a website, redirects requests for this object
7272 // to another object in the same bucket or to an external URL. Amazon S3 stores
7273 // the value of this header in the object metadata.
7274 WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
7275}
7276
7277// String returns the string representation
7278func (s CopyObjectInput) String() string {
7279 return awsutil.Prettify(s)
7280}
7281
7282// GoString returns the string representation
7283func (s CopyObjectInput) GoString() string {
7284 return s.String()
7285}
7286
7287// Validate inspects the fields of the type to determine if they are valid.
7288func (s *CopyObjectInput) Validate() error {
7289 invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"}
7290 if s.Bucket == nil {
7291 invalidParams.Add(request.NewErrParamRequired("Bucket"))
7292 }
7293 if s.CopySource == nil {
7294 invalidParams.Add(request.NewErrParamRequired("CopySource"))
7295 }
7296 if s.Key == nil {
7297 invalidParams.Add(request.NewErrParamRequired("Key"))
7298 }
7299 if s.Key != nil && len(*s.Key) < 1 {
7300 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
7301 }
7302
7303 if invalidParams.Len() > 0 {
7304 return invalidParams
7305 }
7306 return nil
7307}
7308
7309// SetACL sets the ACL field's value.
7310func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput {
7311 s.ACL = &v
7312 return s
7313}
7314
7315// SetBucket sets the Bucket field's value.
7316func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput {
7317 s.Bucket = &v
7318 return s
7319}
7320
7321// SetCacheControl sets the CacheControl field's value.
7322func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput {
7323 s.CacheControl = &v
7324 return s
7325}
7326
7327// SetContentDisposition sets the ContentDisposition field's value.
7328func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput {
7329 s.ContentDisposition = &v
7330 return s
7331}
7332
7333// SetContentEncoding sets the ContentEncoding field's value.
7334func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput {
7335 s.ContentEncoding = &v
7336 return s
7337}
7338
7339// SetContentLanguage sets the ContentLanguage field's value.
7340func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput {
7341 s.ContentLanguage = &v
7342 return s
7343}
7344
7345// SetContentType sets the ContentType field's value.
7346func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput {
7347 s.ContentType = &v
7348 return s
7349}
7350
7351// SetCopySource sets the CopySource field's value.
7352func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput {
7353 s.CopySource = &v
7354 return s
7355}
7356
7357// SetCopySourceIfMatch sets the CopySourceIfMatch field's value.
7358func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput {
7359 s.CopySourceIfMatch = &v
7360 return s
7361}
7362
7363// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value.
7364func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput {
7365 s.CopySourceIfModifiedSince = &v
7366 return s
7367}
7368
7369// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value.
7370func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput {
7371 s.CopySourceIfNoneMatch = &v
7372 return s
7373}
7374
7375// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value.
7376func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput {
7377 s.CopySourceIfUnmodifiedSince = &v
7378 return s
7379}
7380
7381// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value.
7382func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput {
7383 s.CopySourceSSECustomerAlgorithm = &v
7384 return s
7385}
7386
7387// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value.
7388func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput {
7389 s.CopySourceSSECustomerKey = &v
7390 return s
7391}
7392
7393// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
7394func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput {
7395 s.CopySourceSSECustomerKeyMD5 = &v
7396 return s
7397}
7398
7399// SetExpires sets the Expires field's value.
7400func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput {
7401 s.Expires = &v
7402 return s
7403}
7404
7405// SetGrantFullControl sets the GrantFullControl field's value.
7406func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput {
7407 s.GrantFullControl = &v
7408 return s
7409}
7410
7411// SetGrantRead sets the GrantRead field's value.
7412func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput {
7413 s.GrantRead = &v
7414 return s
7415}
7416
7417// SetGrantReadACP sets the GrantReadACP field's value.
7418func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput {
7419 s.GrantReadACP = &v
7420 return s
7421}
7422
7423// SetGrantWriteACP sets the GrantWriteACP field's value.
7424func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput {
7425 s.GrantWriteACP = &v
7426 return s
7427}
7428
7429// SetKey sets the Key field's value.
7430func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput {
7431 s.Key = &v
7432 return s
7433}
7434
7435// SetMetadata sets the Metadata field's value.
7436func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput {
7437 s.Metadata = v
7438 return s
7439}
7440
7441// SetMetadataDirective sets the MetadataDirective field's value.
7442func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput {
7443 s.MetadataDirective = &v
7444 return s
7445}
7446
7447// SetRequestPayer sets the RequestPayer field's value.
7448func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput {
7449 s.RequestPayer = &v
7450 return s
7451}
7452
7453// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
7454func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput {
7455 s.SSECustomerAlgorithm = &v
7456 return s
7457}
7458
7459// SetSSECustomerKey sets the SSECustomerKey field's value.
7460func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput {
7461 s.SSECustomerKey = &v
7462 return s
7463}
7464
7465// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
7466func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput {
7467 s.SSECustomerKeyMD5 = &v
7468 return s
7469}
7470
7471// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
7472func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput {
7473 s.SSEKMSKeyId = &v
7474 return s
7475}
7476
7477// SetServerSideEncryption sets the ServerSideEncryption field's value.
7478func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput {
7479 s.ServerSideEncryption = &v
7480 return s
7481}
7482
7483// SetStorageClass sets the StorageClass field's value.
7484func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput {
7485 s.StorageClass = &v
7486 return s
7487}
7488
7489// SetTagging sets the Tagging field's value.
7490func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput {
7491 s.Tagging = &v
7492 return s
7493}
7494
7495// SetTaggingDirective sets the TaggingDirective field's value.
7496func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput {
7497 s.TaggingDirective = &v
7498 return s
7499}
7500
7501// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
7502func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput {
7503 s.WebsiteRedirectLocation = &v
7504 return s
7505}
7506
7507// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput
7508type CopyObjectOutput struct {
7509 _ struct{} `type:"structure" payload:"CopyObjectResult"`
7510
7511 CopyObjectResult *CopyObjectResult `type:"structure"`
7512
7513 CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
7514
7515 // If the object expiration is configured, the response includes this header.
7516 Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
7517
7518 // If present, indicates that the requester was successfully charged for the
7519 // request.
7520 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
7521
7522 // If server-side encryption with a customer-provided encryption key was requested,
7523 // the response will include this header confirming the encryption algorithm
7524 // used.
7525 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
7526
7527 // If server-side encryption with a customer-provided encryption key was requested,
7528 // the response will include this header to provide round trip message integrity
7529 // verification of the customer-provided encryption key.
7530 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
7531
7532 // If present, specifies the ID of the AWS Key Management Service (KMS) master
7533 // encryption key that was used for the object.
7534 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
7535
7536 // The Server-side encryption algorithm used when storing this object in S3
7537 // (e.g., AES256, aws:kms).
7538 ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
7539
7540 // Version ID of the newly created copy.
7541 VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
7542}
7543
7544// String returns the string representation
7545func (s CopyObjectOutput) String() string {
7546 return awsutil.Prettify(s)
7547}
7548
7549// GoString returns the string representation
7550func (s CopyObjectOutput) GoString() string {
7551 return s.String()
7552}
7553
7554// SetCopyObjectResult sets the CopyObjectResult field's value.
7555func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput {
7556 s.CopyObjectResult = v
7557 return s
7558}
7559
7560// SetCopySourceVersionId sets the CopySourceVersionId field's value.
7561func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput {
7562 s.CopySourceVersionId = &v
7563 return s
7564}
7565
7566// SetExpiration sets the Expiration field's value.
7567func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput {
7568 s.Expiration = &v
7569 return s
7570}
7571
7572// SetRequestCharged sets the RequestCharged field's value.
7573func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput {
7574 s.RequestCharged = &v
7575 return s
7576}
7577
7578// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
7579func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput {
7580 s.SSECustomerAlgorithm = &v
7581 return s
7582}
7583
7584// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
7585func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput {
7586 s.SSECustomerKeyMD5 = &v
7587 return s
7588}
7589
7590// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
7591func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput {
7592 s.SSEKMSKeyId = &v
7593 return s
7594}
7595
7596// SetServerSideEncryption sets the ServerSideEncryption field's value.
7597func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput {
7598 s.ServerSideEncryption = &v
7599 return s
7600}
7601
7602// SetVersionId sets the VersionId field's value.
7603func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput {
7604 s.VersionId = &v
7605 return s
7606}
7607
7608// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult
7609type CopyObjectResult struct {
7610 _ struct{} `type:"structure"`
7611
7612 ETag *string `type:"string"`
7613
7614 LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
7615}
7616
7617// String returns the string representation
7618func (s CopyObjectResult) String() string {
7619 return awsutil.Prettify(s)
7620}
7621
7622// GoString returns the string representation
7623func (s CopyObjectResult) GoString() string {
7624 return s.String()
7625}
7626
7627// SetETag sets the ETag field's value.
7628func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult {
7629 s.ETag = &v
7630 return s
7631}
7632
7633// SetLastModified sets the LastModified field's value.
7634func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult {
7635 s.LastModified = &v
7636 return s
7637}
7638
7639// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult
7640type CopyPartResult struct {
7641 _ struct{} `type:"structure"`
7642
7643 // Entity tag of the object.
7644 ETag *string `type:"string"`
7645
7646 // Date and time at which the object was uploaded.
7647 LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
7648}
7649
7650// String returns the string representation
7651func (s CopyPartResult) String() string {
7652 return awsutil.Prettify(s)
7653}
7654
7655// GoString returns the string representation
7656func (s CopyPartResult) GoString() string {
7657 return s.String()
7658}
7659
7660// SetETag sets the ETag field's value.
7661func (s *CopyPartResult) SetETag(v string) *CopyPartResult {
7662 s.ETag = &v
7663 return s
7664}
7665
7666// SetLastModified sets the LastModified field's value.
7667func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult {
7668 s.LastModified = &v
7669 return s
7670}
7671
7672// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration
7673type CreateBucketConfiguration struct {
7674 _ struct{} `type:"structure"`
7675
7676 // Specifies the region where the bucket will be created. If you don't specify
7677 // a region, the bucket will be created in US Standard.
7678 LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
7679}
7680
7681// String returns the string representation
7682func (s CreateBucketConfiguration) String() string {
7683 return awsutil.Prettify(s)
7684}
7685
7686// GoString returns the string representation
7687func (s CreateBucketConfiguration) GoString() string {
7688 return s.String()
7689}
7690
7691// SetLocationConstraint sets the LocationConstraint field's value.
7692func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration {
7693 s.LocationConstraint = &v
7694 return s
7695}
7696
7697// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest
7698type CreateBucketInput struct {
7699 _ struct{} `type:"structure" payload:"CreateBucketConfiguration"`
7700
7701 // The canned ACL to apply to the bucket.
7702 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
7703
7704 // Bucket is a required field
7705 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
7706
7707 CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"`
7708
7709 // Allows grantee the read, write, read ACP, and write ACP permissions on the
7710 // bucket.
7711 GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
7712
7713 // Allows grantee to list the objects in the bucket.
7714 GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
7715
7716 // Allows grantee to read the bucket ACL.
7717 GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
7718
7719 // Allows grantee to create, overwrite, and delete any object in the bucket.
7720 GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
7721
7722 // Allows grantee to write the ACL for the applicable bucket.
7723 GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
7724}
7725
7726// String returns the string representation
7727func (s CreateBucketInput) String() string {
7728 return awsutil.Prettify(s)
7729}
7730
7731// GoString returns the string representation
7732func (s CreateBucketInput) GoString() string {
7733 return s.String()
7734}
7735
7736// Validate inspects the fields of the type to determine if they are valid.
7737func (s *CreateBucketInput) Validate() error {
7738 invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"}
7739 if s.Bucket == nil {
7740 invalidParams.Add(request.NewErrParamRequired("Bucket"))
7741 }
7742
7743 if invalidParams.Len() > 0 {
7744 return invalidParams
7745 }
7746 return nil
7747}
7748
7749// SetACL sets the ACL field's value.
7750func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput {
7751 s.ACL = &v
7752 return s
7753}
7754
7755// SetBucket sets the Bucket field's value.
7756func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput {
7757 s.Bucket = &v
7758 return s
7759}
7760
7761// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value.
7762func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput {
7763 s.CreateBucketConfiguration = v
7764 return s
7765}
7766
7767// SetGrantFullControl sets the GrantFullControl field's value.
7768func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput {
7769 s.GrantFullControl = &v
7770 return s
7771}
7772
7773// SetGrantRead sets the GrantRead field's value.
7774func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput {
7775 s.GrantRead = &v
7776 return s
7777}
7778
7779// SetGrantReadACP sets the GrantReadACP field's value.
7780func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput {
7781 s.GrantReadACP = &v
7782 return s
7783}
7784
7785// SetGrantWrite sets the GrantWrite field's value.
7786func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput {
7787 s.GrantWrite = &v
7788 return s
7789}
7790
7791// SetGrantWriteACP sets the GrantWriteACP field's value.
7792func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput {
7793 s.GrantWriteACP = &v
7794 return s
7795}
7796
7797// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput
7798type CreateBucketOutput struct {
7799 _ struct{} `type:"structure"`
7800
7801 Location *string `location:"header" locationName:"Location" type:"string"`
7802}
7803
7804// String returns the string representation
7805func (s CreateBucketOutput) String() string {
7806 return awsutil.Prettify(s)
7807}
7808
7809// GoString returns the string representation
7810func (s CreateBucketOutput) GoString() string {
7811 return s.String()
7812}
7813
7814// SetLocation sets the Location field's value.
7815func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput {
7816 s.Location = &v
7817 return s
7818}
7819
7820// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest
7821type CreateMultipartUploadInput struct {
7822 _ struct{} `type:"structure"`
7823
7824 // The canned ACL to apply to the object.
7825 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
7826
7827 // Bucket is a required field
7828 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
7829
7830 // Specifies caching behavior along the request/reply chain.
7831 CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
7832
7833 // Specifies presentational information for the object.
7834 ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
7835
7836 // Specifies what content encodings have been applied to the object and thus
7837 // what decoding mechanisms must be applied to obtain the media-type referenced
7838 // by the Content-Type header field.
7839 ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
7840
7841 // The language the content is in.
7842 ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
7843
7844 // A standard MIME type describing the format of the object data.
7845 ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
7846
7847 // The date and time at which the object is no longer cacheable.
7848 Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
7849
7850 // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
7851 GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
7852
7853 // Allows grantee to read the object data and its metadata.
7854 GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
7855
7856 // Allows grantee to read the object ACL.
7857 GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
7858
7859 // Allows grantee to write the ACL for the applicable object.
7860 GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
7861
7862 // Key is a required field
7863 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
7864
7865 // A map of metadata to store with the object in S3.
7866 Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
7867
7868 // Confirms that the requester knows that she or he will be charged for the
7869 // request. Bucket owners need not specify this parameter in their requests.
7870 // Documentation on downloading objects from requester pays buckets can be found
7871 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
7872 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
7873
7874 // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
7875 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
7876
7877 // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
7878 // data. This value is used to store the object and then it is discarded; Amazon
7879 // does not store the encryption key. The key must be appropriate for use with
7880 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
7881 // header.
7882 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
7883
7884 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
7885 // Amazon S3 uses this header for a message integrity check to ensure the encryption
7886 // key was transmitted without error.
7887 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
7888
7889 // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
7890 // requests for an object protected by AWS KMS will fail if not made via SSL
7891 // or using SigV4. Documentation on configuring any of the officially supported
7892 // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
7893 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
7894
7895 // The Server-side encryption algorithm used when storing this object in S3
7896 // (e.g., AES256, aws:kms).
7897 ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
7898
7899 // The type of storage to use for the object. Defaults to 'STANDARD'.
7900 StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
7901
7902 // If the bucket is configured as a website, redirects requests for this object
7903 // to another object in the same bucket or to an external URL. Amazon S3 stores
7904 // the value of this header in the object metadata.
7905 WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
7906}
7907
7908// String returns the string representation
7909func (s CreateMultipartUploadInput) String() string {
7910 return awsutil.Prettify(s)
7911}
7912
7913// GoString returns the string representation
7914func (s CreateMultipartUploadInput) GoString() string {
7915 return s.String()
7916}
7917
7918// Validate inspects the fields of the type to determine if they are valid.
7919func (s *CreateMultipartUploadInput) Validate() error {
7920 invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"}
7921 if s.Bucket == nil {
7922 invalidParams.Add(request.NewErrParamRequired("Bucket"))
7923 }
7924 if s.Key == nil {
7925 invalidParams.Add(request.NewErrParamRequired("Key"))
7926 }
7927 if s.Key != nil && len(*s.Key) < 1 {
7928 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
7929 }
7930
7931 if invalidParams.Len() > 0 {
7932 return invalidParams
7933 }
7934 return nil
7935}
7936
7937// SetACL sets the ACL field's value.
7938func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput {
7939 s.ACL = &v
7940 return s
7941}
7942
7943// SetBucket sets the Bucket field's value.
7944func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput {
7945 s.Bucket = &v
7946 return s
7947}
7948
7949// SetCacheControl sets the CacheControl field's value.
7950func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput {
7951 s.CacheControl = &v
7952 return s
7953}
7954
7955// SetContentDisposition sets the ContentDisposition field's value.
7956func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput {
7957 s.ContentDisposition = &v
7958 return s
7959}
7960
7961// SetContentEncoding sets the ContentEncoding field's value.
7962func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput {
7963 s.ContentEncoding = &v
7964 return s
7965}
7966
7967// SetContentLanguage sets the ContentLanguage field's value.
7968func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput {
7969 s.ContentLanguage = &v
7970 return s
7971}
7972
7973// SetContentType sets the ContentType field's value.
7974func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput {
7975 s.ContentType = &v
7976 return s
7977}
7978
7979// SetExpires sets the Expires field's value.
7980func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput {
7981 s.Expires = &v
7982 return s
7983}
7984
7985// SetGrantFullControl sets the GrantFullControl field's value.
7986func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput {
7987 s.GrantFullControl = &v
7988 return s
7989}
7990
7991// SetGrantRead sets the GrantRead field's value.
7992func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput {
7993 s.GrantRead = &v
7994 return s
7995}
7996
7997// SetGrantReadACP sets the GrantReadACP field's value.
7998func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput {
7999 s.GrantReadACP = &v
8000 return s
8001}
8002
8003// SetGrantWriteACP sets the GrantWriteACP field's value.
8004func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput {
8005 s.GrantWriteACP = &v
8006 return s
8007}
8008
8009// SetKey sets the Key field's value.
8010func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput {
8011 s.Key = &v
8012 return s
8013}
8014
8015// SetMetadata sets the Metadata field's value.
8016func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput {
8017 s.Metadata = v
8018 return s
8019}
8020
8021// SetRequestPayer sets the RequestPayer field's value.
8022func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput {
8023 s.RequestPayer = &v
8024 return s
8025}
8026
8027// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
8028func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput {
8029 s.SSECustomerAlgorithm = &v
8030 return s
8031}
8032
8033// SetSSECustomerKey sets the SSECustomerKey field's value.
8034func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput {
8035 s.SSECustomerKey = &v
8036 return s
8037}
8038
8039// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
8040func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput {
8041 s.SSECustomerKeyMD5 = &v
8042 return s
8043}
8044
8045// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
8046func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput {
8047 s.SSEKMSKeyId = &v
8048 return s
8049}
8050
8051// SetServerSideEncryption sets the ServerSideEncryption field's value.
8052func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput {
8053 s.ServerSideEncryption = &v
8054 return s
8055}
8056
8057// SetStorageClass sets the StorageClass field's value.
8058func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput {
8059 s.StorageClass = &v
8060 return s
8061}
8062
8063// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
8064func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput {
8065 s.WebsiteRedirectLocation = &v
8066 return s
8067}
8068
8069// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput
8070type CreateMultipartUploadOutput struct {
8071 _ struct{} `type:"structure"`
8072
8073 // Date when multipart upload will become eligible for abort operation by lifecycle.
8074 AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
8075
8076 // Id of the lifecycle rule that makes a multipart upload eligible for abort
8077 // operation.
8078 AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
8079
8080 // Name of the bucket to which the multipart upload was initiated.
8081 Bucket *string `locationName:"Bucket" type:"string"`
8082
8083 // Object key for which the multipart upload was initiated.
8084 Key *string `min:"1" type:"string"`
8085
8086 // If present, indicates that the requester was successfully charged for the
8087 // request.
8088 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
8089
8090 // If server-side encryption with a customer-provided encryption key was requested,
8091 // the response will include this header confirming the encryption algorithm
8092 // used.
8093 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
8094
8095 // If server-side encryption with a customer-provided encryption key was requested,
8096 // the response will include this header to provide round trip message integrity
8097 // verification of the customer-provided encryption key.
8098 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
8099
8100 // If present, specifies the ID of the AWS Key Management Service (KMS) master
8101 // encryption key that was used for the object.
8102 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
8103
8104 // The Server-side encryption algorithm used when storing this object in S3
8105 // (e.g., AES256, aws:kms).
8106 ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
8107
8108 // ID for the initiated multipart upload.
8109 UploadId *string `type:"string"`
8110}
8111
8112// String returns the string representation
8113func (s CreateMultipartUploadOutput) String() string {
8114 return awsutil.Prettify(s)
8115}
8116
8117// GoString returns the string representation
8118func (s CreateMultipartUploadOutput) GoString() string {
8119 return s.String()
8120}
8121
8122// SetAbortDate sets the AbortDate field's value.
8123func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput {
8124 s.AbortDate = &v
8125 return s
8126}
8127
8128// SetAbortRuleId sets the AbortRuleId field's value.
8129func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput {
8130 s.AbortRuleId = &v
8131 return s
8132}
8133
8134// SetBucket sets the Bucket field's value.
8135func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput {
8136 s.Bucket = &v
8137 return s
8138}
8139
8140// SetKey sets the Key field's value.
8141func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput {
8142 s.Key = &v
8143 return s
8144}
8145
8146// SetRequestCharged sets the RequestCharged field's value.
8147func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput {
8148 s.RequestCharged = &v
8149 return s
8150}
8151
8152// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
8153func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput {
8154 s.SSECustomerAlgorithm = &v
8155 return s
8156}
8157
8158// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
8159func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput {
8160 s.SSECustomerKeyMD5 = &v
8161 return s
8162}
8163
8164// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
8165func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput {
8166 s.SSEKMSKeyId = &v
8167 return s
8168}
8169
8170// SetServerSideEncryption sets the ServerSideEncryption field's value.
8171func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput {
8172 s.ServerSideEncryption = &v
8173 return s
8174}
8175
8176// SetUploadId sets the UploadId field's value.
8177func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput {
8178 s.UploadId = &v
8179 return s
8180}
8181
8182// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete
8183type Delete struct {
8184 _ struct{} `type:"structure"`
8185
8186 // Objects is a required field
8187 Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"`
8188
8189 // Element to enable quiet mode for the request. When you add this element,
8190 // you must set its value to true.
8191 Quiet *bool `type:"boolean"`
8192}
8193
8194// String returns the string representation
8195func (s Delete) String() string {
8196 return awsutil.Prettify(s)
8197}
8198
8199// GoString returns the string representation
8200func (s Delete) GoString() string {
8201 return s.String()
8202}
8203
8204// Validate inspects the fields of the type to determine if they are valid.
8205func (s *Delete) Validate() error {
8206 invalidParams := request.ErrInvalidParams{Context: "Delete"}
8207 if s.Objects == nil {
8208 invalidParams.Add(request.NewErrParamRequired("Objects"))
8209 }
8210 if s.Objects != nil {
8211 for i, v := range s.Objects {
8212 if v == nil {
8213 continue
8214 }
8215 if err := v.Validate(); err != nil {
8216 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams))
8217 }
8218 }
8219 }
8220
8221 if invalidParams.Len() > 0 {
8222 return invalidParams
8223 }
8224 return nil
8225}
8226
8227// SetObjects sets the Objects field's value.
8228func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete {
8229 s.Objects = v
8230 return s
8231}
8232
8233// SetQuiet sets the Quiet field's value.
8234func (s *Delete) SetQuiet(v bool) *Delete {
8235 s.Quiet = &v
8236 return s
8237}
8238
8239// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest
8240type DeleteBucketAnalyticsConfigurationInput struct {
8241 _ struct{} `type:"structure"`
8242
8243 // The name of the bucket from which an analytics configuration is deleted.
8244 //
8245 // Bucket is a required field
8246 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
8247
8248 // The identifier used to represent an analytics configuration.
8249 //
8250 // Id is a required field
8251 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
8252}
8253
8254// String returns the string representation
8255func (s DeleteBucketAnalyticsConfigurationInput) String() string {
8256 return awsutil.Prettify(s)
8257}
8258
8259// GoString returns the string representation
8260func (s DeleteBucketAnalyticsConfigurationInput) GoString() string {
8261 return s.String()
8262}
8263
8264// Validate inspects the fields of the type to determine if they are valid.
8265func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error {
8266 invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"}
8267 if s.Bucket == nil {
8268 invalidParams.Add(request.NewErrParamRequired("Bucket"))
8269 }
8270 if s.Id == nil {
8271 invalidParams.Add(request.NewErrParamRequired("Id"))
8272 }
8273
8274 if invalidParams.Len() > 0 {
8275 return invalidParams
8276 }
8277 return nil
8278}
8279
8280// SetBucket sets the Bucket field's value.
8281func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput {
8282 s.Bucket = &v
8283 return s
8284}
8285
8286// SetId sets the Id field's value.
8287func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput {
8288 s.Id = &v
8289 return s
8290}
8291
8292// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput
8293type DeleteBucketAnalyticsConfigurationOutput struct {
8294 _ struct{} `type:"structure"`
8295}
8296
8297// String returns the string representation
8298func (s DeleteBucketAnalyticsConfigurationOutput) String() string {
8299 return awsutil.Prettify(s)
8300}
8301
8302// GoString returns the string representation
8303func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string {
8304 return s.String()
8305}
8306
8307// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest
8308type DeleteBucketCorsInput struct {
8309 _ struct{} `type:"structure"`
8310
8311 // Bucket is a required field
8312 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
8313}
8314
8315// String returns the string representation
8316func (s DeleteBucketCorsInput) String() string {
8317 return awsutil.Prettify(s)
8318}
8319
8320// GoString returns the string representation
8321func (s DeleteBucketCorsInput) GoString() string {
8322 return s.String()
8323}
8324
8325// Validate inspects the fields of the type to determine if they are valid.
8326func (s *DeleteBucketCorsInput) Validate() error {
8327 invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"}
8328 if s.Bucket == nil {
8329 invalidParams.Add(request.NewErrParamRequired("Bucket"))
8330 }
8331
8332 if invalidParams.Len() > 0 {
8333 return invalidParams
8334 }
8335 return nil
8336}
8337
8338// SetBucket sets the Bucket field's value.
8339func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput {
8340 s.Bucket = &v
8341 return s
8342}
8343
8344// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput
8345type DeleteBucketCorsOutput struct {
8346 _ struct{} `type:"structure"`
8347}
8348
8349// String returns the string representation
8350func (s DeleteBucketCorsOutput) String() string {
8351 return awsutil.Prettify(s)
8352}
8353
8354// GoString returns the string representation
8355func (s DeleteBucketCorsOutput) GoString() string {
8356 return s.String()
8357}
8358
8359// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest
8360type DeleteBucketInput struct {
8361 _ struct{} `type:"structure"`
8362
8363 // Bucket is a required field
8364 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
8365}
8366
8367// String returns the string representation
8368func (s DeleteBucketInput) String() string {
8369 return awsutil.Prettify(s)
8370}
8371
8372// GoString returns the string representation
8373func (s DeleteBucketInput) GoString() string {
8374 return s.String()
8375}
8376
8377// Validate inspects the fields of the type to determine if they are valid.
8378func (s *DeleteBucketInput) Validate() error {
8379 invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"}
8380 if s.Bucket == nil {
8381 invalidParams.Add(request.NewErrParamRequired("Bucket"))
8382 }
8383
8384 if invalidParams.Len() > 0 {
8385 return invalidParams
8386 }
8387 return nil
8388}
8389
8390// SetBucket sets the Bucket field's value.
8391func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput {
8392 s.Bucket = &v
8393 return s
8394}
8395
8396// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest
8397type DeleteBucketInventoryConfigurationInput struct {
8398 _ struct{} `type:"structure"`
8399
8400 // The name of the bucket containing the inventory configuration to delete.
8401 //
8402 // Bucket is a required field
8403 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
8404
8405 // The ID used to identify the inventory configuration.
8406 //
8407 // Id is a required field
8408 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
8409}
8410
8411// String returns the string representation
8412func (s DeleteBucketInventoryConfigurationInput) String() string {
8413 return awsutil.Prettify(s)
8414}
8415
8416// GoString returns the string representation
8417func (s DeleteBucketInventoryConfigurationInput) GoString() string {
8418 return s.String()
8419}
8420
8421// Validate inspects the fields of the type to determine if they are valid.
8422func (s *DeleteBucketInventoryConfigurationInput) Validate() error {
8423 invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"}
8424 if s.Bucket == nil {
8425 invalidParams.Add(request.NewErrParamRequired("Bucket"))
8426 }
8427 if s.Id == nil {
8428 invalidParams.Add(request.NewErrParamRequired("Id"))
8429 }
8430
8431 if invalidParams.Len() > 0 {
8432 return invalidParams
8433 }
8434 return nil
8435}
8436
8437// SetBucket sets the Bucket field's value.
8438func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput {
8439 s.Bucket = &v
8440 return s
8441}
8442
8443// SetId sets the Id field's value.
8444func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput {
8445 s.Id = &v
8446 return s
8447}
8448
8449// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput
8450type DeleteBucketInventoryConfigurationOutput struct {
8451 _ struct{} `type:"structure"`
8452}
8453
8454// String returns the string representation
8455func (s DeleteBucketInventoryConfigurationOutput) String() string {
8456 return awsutil.Prettify(s)
8457}
8458
8459// GoString returns the string representation
8460func (s DeleteBucketInventoryConfigurationOutput) GoString() string {
8461 return s.String()
8462}
8463
8464// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest
8465type DeleteBucketLifecycleInput struct {
8466 _ struct{} `type:"structure"`
8467
8468 // Bucket is a required field
8469 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
8470}
8471
8472// String returns the string representation
8473func (s DeleteBucketLifecycleInput) String() string {
8474 return awsutil.Prettify(s)
8475}
8476
8477// GoString returns the string representation
8478func (s DeleteBucketLifecycleInput) GoString() string {
8479 return s.String()
8480}
8481
8482// Validate inspects the fields of the type to determine if they are valid.
8483func (s *DeleteBucketLifecycleInput) Validate() error {
8484 invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"}
8485 if s.Bucket == nil {
8486 invalidParams.Add(request.NewErrParamRequired("Bucket"))
8487 }
8488
8489 if invalidParams.Len() > 0 {
8490 return invalidParams
8491 }
8492 return nil
8493}
8494
8495// SetBucket sets the Bucket field's value.
8496func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput {
8497 s.Bucket = &v
8498 return s
8499}
8500
8501// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput
8502type DeleteBucketLifecycleOutput struct {
8503 _ struct{} `type:"structure"`
8504}
8505
8506// String returns the string representation
8507func (s DeleteBucketLifecycleOutput) String() string {
8508 return awsutil.Prettify(s)
8509}
8510
8511// GoString returns the string representation
8512func (s DeleteBucketLifecycleOutput) GoString() string {
8513 return s.String()
8514}
8515
8516// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest
8517type DeleteBucketMetricsConfigurationInput struct {
8518 _ struct{} `type:"structure"`
8519
8520 // The name of the bucket containing the metrics configuration to delete.
8521 //
8522 // Bucket is a required field
8523 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
8524
8525 // The ID used to identify the metrics configuration.
8526 //
8527 // Id is a required field
8528 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
8529}
8530
8531// String returns the string representation
8532func (s DeleteBucketMetricsConfigurationInput) String() string {
8533 return awsutil.Prettify(s)
8534}
8535
8536// GoString returns the string representation
8537func (s DeleteBucketMetricsConfigurationInput) GoString() string {
8538 return s.String()
8539}
8540
8541// Validate inspects the fields of the type to determine if they are valid.
8542func (s *DeleteBucketMetricsConfigurationInput) Validate() error {
8543 invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"}
8544 if s.Bucket == nil {
8545 invalidParams.Add(request.NewErrParamRequired("Bucket"))
8546 }
8547 if s.Id == nil {
8548 invalidParams.Add(request.NewErrParamRequired("Id"))
8549 }
8550
8551 if invalidParams.Len() > 0 {
8552 return invalidParams
8553 }
8554 return nil
8555}
8556
8557// SetBucket sets the Bucket field's value.
8558func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput {
8559 s.Bucket = &v
8560 return s
8561}
8562
8563// SetId sets the Id field's value.
8564func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput {
8565 s.Id = &v
8566 return s
8567}
8568
8569// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput
8570type DeleteBucketMetricsConfigurationOutput struct {
8571 _ struct{} `type:"structure"`
8572}
8573
8574// String returns the string representation
8575func (s DeleteBucketMetricsConfigurationOutput) String() string {
8576 return awsutil.Prettify(s)
8577}
8578
8579// GoString returns the string representation
8580func (s DeleteBucketMetricsConfigurationOutput) GoString() string {
8581 return s.String()
8582}
8583
8584// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput
8585type DeleteBucketOutput struct {
8586 _ struct{} `type:"structure"`
8587}
8588
8589// String returns the string representation
8590func (s DeleteBucketOutput) String() string {
8591 return awsutil.Prettify(s)
8592}
8593
8594// GoString returns the string representation
8595func (s DeleteBucketOutput) GoString() string {
8596 return s.String()
8597}
8598
8599// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest
8600type DeleteBucketPolicyInput struct {
8601 _ struct{} `type:"structure"`
8602
8603 // Bucket is a required field
8604 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
8605}
8606
8607// String returns the string representation
8608func (s DeleteBucketPolicyInput) String() string {
8609 return awsutil.Prettify(s)
8610}
8611
8612// GoString returns the string representation
8613func (s DeleteBucketPolicyInput) GoString() string {
8614 return s.String()
8615}
8616
8617// Validate inspects the fields of the type to determine if they are valid.
8618func (s *DeleteBucketPolicyInput) Validate() error {
8619 invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"}
8620 if s.Bucket == nil {
8621 invalidParams.Add(request.NewErrParamRequired("Bucket"))
8622 }
8623
8624 if invalidParams.Len() > 0 {
8625 return invalidParams
8626 }
8627 return nil
8628}
8629
8630// SetBucket sets the Bucket field's value.
8631func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput {
8632 s.Bucket = &v
8633 return s
8634}
8635
8636// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput
8637type DeleteBucketPolicyOutput struct {
8638 _ struct{} `type:"structure"`
8639}
8640
8641// String returns the string representation
8642func (s DeleteBucketPolicyOutput) String() string {
8643 return awsutil.Prettify(s)
8644}
8645
8646// GoString returns the string representation
8647func (s DeleteBucketPolicyOutput) GoString() string {
8648 return s.String()
8649}
8650
8651// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest
8652type DeleteBucketReplicationInput struct {
8653 _ struct{} `type:"structure"`
8654
8655 // Bucket is a required field
8656 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
8657}
8658
8659// String returns the string representation
8660func (s DeleteBucketReplicationInput) String() string {
8661 return awsutil.Prettify(s)
8662}
8663
8664// GoString returns the string representation
8665func (s DeleteBucketReplicationInput) GoString() string {
8666 return s.String()
8667}
8668
8669// Validate inspects the fields of the type to determine if they are valid.
8670func (s *DeleteBucketReplicationInput) Validate() error {
8671 invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"}
8672 if s.Bucket == nil {
8673 invalidParams.Add(request.NewErrParamRequired("Bucket"))
8674 }
8675
8676 if invalidParams.Len() > 0 {
8677 return invalidParams
8678 }
8679 return nil
8680}
8681
8682// SetBucket sets the Bucket field's value.
8683func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput {
8684 s.Bucket = &v
8685 return s
8686}
8687
8688// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput
8689type DeleteBucketReplicationOutput struct {
8690 _ struct{} `type:"structure"`
8691}
8692
8693// String returns the string representation
8694func (s DeleteBucketReplicationOutput) String() string {
8695 return awsutil.Prettify(s)
8696}
8697
8698// GoString returns the string representation
8699func (s DeleteBucketReplicationOutput) GoString() string {
8700 return s.String()
8701}
8702
8703// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest
8704type DeleteBucketTaggingInput struct {
8705 _ struct{} `type:"structure"`
8706
8707 // Bucket is a required field
8708 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
8709}
8710
8711// String returns the string representation
8712func (s DeleteBucketTaggingInput) String() string {
8713 return awsutil.Prettify(s)
8714}
8715
8716// GoString returns the string representation
8717func (s DeleteBucketTaggingInput) GoString() string {
8718 return s.String()
8719}
8720
8721// Validate inspects the fields of the type to determine if they are valid.
8722func (s *DeleteBucketTaggingInput) Validate() error {
8723 invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"}
8724 if s.Bucket == nil {
8725 invalidParams.Add(request.NewErrParamRequired("Bucket"))
8726 }
8727
8728 if invalidParams.Len() > 0 {
8729 return invalidParams
8730 }
8731 return nil
8732}
8733
8734// SetBucket sets the Bucket field's value.
8735func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput {
8736 s.Bucket = &v
8737 return s
8738}
8739
8740// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput
8741type DeleteBucketTaggingOutput struct {
8742 _ struct{} `type:"structure"`
8743}
8744
8745// String returns the string representation
8746func (s DeleteBucketTaggingOutput) String() string {
8747 return awsutil.Prettify(s)
8748}
8749
8750// GoString returns the string representation
8751func (s DeleteBucketTaggingOutput) GoString() string {
8752 return s.String()
8753}
8754
8755// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest
8756type DeleteBucketWebsiteInput struct {
8757 _ struct{} `type:"structure"`
8758
8759 // Bucket is a required field
8760 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
8761}
8762
8763// String returns the string representation
8764func (s DeleteBucketWebsiteInput) String() string {
8765 return awsutil.Prettify(s)
8766}
8767
8768// GoString returns the string representation
8769func (s DeleteBucketWebsiteInput) GoString() string {
8770 return s.String()
8771}
8772
8773// Validate inspects the fields of the type to determine if they are valid.
8774func (s *DeleteBucketWebsiteInput) Validate() error {
8775 invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"}
8776 if s.Bucket == nil {
8777 invalidParams.Add(request.NewErrParamRequired("Bucket"))
8778 }
8779
8780 if invalidParams.Len() > 0 {
8781 return invalidParams
8782 }
8783 return nil
8784}
8785
8786// SetBucket sets the Bucket field's value.
8787func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput {
8788 s.Bucket = &v
8789 return s
8790}
8791
8792// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput
8793type DeleteBucketWebsiteOutput struct {
8794 _ struct{} `type:"structure"`
8795}
8796
8797// String returns the string representation
8798func (s DeleteBucketWebsiteOutput) String() string {
8799 return awsutil.Prettify(s)
8800}
8801
8802// GoString returns the string representation
8803func (s DeleteBucketWebsiteOutput) GoString() string {
8804 return s.String()
8805}
8806
8807// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry
8808type DeleteMarkerEntry struct {
8809 _ struct{} `type:"structure"`
8810
8811 // Specifies whether the object is (true) or is not (false) the latest version
8812 // of an object.
8813 IsLatest *bool `type:"boolean"`
8814
8815 // The object key.
8816 Key *string `min:"1" type:"string"`
8817
8818 // Date and time the object was last modified.
8819 LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
8820
8821 Owner *Owner `type:"structure"`
8822
8823 // Version ID of an object.
8824 VersionId *string `type:"string"`
8825}
8826
8827// String returns the string representation
8828func (s DeleteMarkerEntry) String() string {
8829 return awsutil.Prettify(s)
8830}
8831
8832// GoString returns the string representation
8833func (s DeleteMarkerEntry) GoString() string {
8834 return s.String()
8835}
8836
8837// SetIsLatest sets the IsLatest field's value.
8838func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry {
8839 s.IsLatest = &v
8840 return s
8841}
8842
8843// SetKey sets the Key field's value.
8844func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry {
8845 s.Key = &v
8846 return s
8847}
8848
8849// SetLastModified sets the LastModified field's value.
8850func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry {
8851 s.LastModified = &v
8852 return s
8853}
8854
8855// SetOwner sets the Owner field's value.
8856func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry {
8857 s.Owner = v
8858 return s
8859}
8860
8861// SetVersionId sets the VersionId field's value.
8862func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry {
8863 s.VersionId = &v
8864 return s
8865}
8866
8867// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest
8868type DeleteObjectInput struct {
8869 _ struct{} `type:"structure"`
8870
8871 // Bucket is a required field
8872 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
8873
8874 // Key is a required field
8875 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
8876
8877 // The concatenation of the authentication device's serial number, a space,
8878 // and the value that is displayed on your authentication device.
8879 MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
8880
8881 // Confirms that the requester knows that she or he will be charged for the
8882 // request. Bucket owners need not specify this parameter in their requests.
8883 // Documentation on downloading objects from requester pays buckets can be found
8884 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
8885 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
8886
8887 // VersionId used to reference a specific version of the object.
8888 VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
8889}
8890
8891// String returns the string representation
8892func (s DeleteObjectInput) String() string {
8893 return awsutil.Prettify(s)
8894}
8895
8896// GoString returns the string representation
8897func (s DeleteObjectInput) GoString() string {
8898 return s.String()
8899}
8900
8901// Validate inspects the fields of the type to determine if they are valid.
8902func (s *DeleteObjectInput) Validate() error {
8903 invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"}
8904 if s.Bucket == nil {
8905 invalidParams.Add(request.NewErrParamRequired("Bucket"))
8906 }
8907 if s.Key == nil {
8908 invalidParams.Add(request.NewErrParamRequired("Key"))
8909 }
8910 if s.Key != nil && len(*s.Key) < 1 {
8911 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
8912 }
8913
8914 if invalidParams.Len() > 0 {
8915 return invalidParams
8916 }
8917 return nil
8918}
8919
8920// SetBucket sets the Bucket field's value.
8921func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput {
8922 s.Bucket = &v
8923 return s
8924}
8925
8926// SetKey sets the Key field's value.
8927func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput {
8928 s.Key = &v
8929 return s
8930}
8931
8932// SetMFA sets the MFA field's value.
8933func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput {
8934 s.MFA = &v
8935 return s
8936}
8937
8938// SetRequestPayer sets the RequestPayer field's value.
8939func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput {
8940 s.RequestPayer = &v
8941 return s
8942}
8943
8944// SetVersionId sets the VersionId field's value.
8945func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput {
8946 s.VersionId = &v
8947 return s
8948}
8949
8950// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput
8951type DeleteObjectOutput struct {
8952 _ struct{} `type:"structure"`
8953
8954 // Specifies whether the versioned object that was permanently deleted was (true)
8955 // or was not (false) a delete marker.
8956 DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
8957
8958 // If present, indicates that the requester was successfully charged for the
8959 // request.
8960 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
8961
8962 // Returns the version ID of the delete marker created as a result of the DELETE
8963 // operation.
8964 VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
8965}
8966
8967// String returns the string representation
8968func (s DeleteObjectOutput) String() string {
8969 return awsutil.Prettify(s)
8970}
8971
8972// GoString returns the string representation
8973func (s DeleteObjectOutput) GoString() string {
8974 return s.String()
8975}
8976
8977// SetDeleteMarker sets the DeleteMarker field's value.
8978func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput {
8979 s.DeleteMarker = &v
8980 return s
8981}
8982
8983// SetRequestCharged sets the RequestCharged field's value.
8984func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput {
8985 s.RequestCharged = &v
8986 return s
8987}
8988
8989// SetVersionId sets the VersionId field's value.
8990func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput {
8991 s.VersionId = &v
8992 return s
8993}
8994
8995// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest
8996type DeleteObjectTaggingInput struct {
8997 _ struct{} `type:"structure"`
8998
8999 // Bucket is a required field
9000 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
9001
9002 // Key is a required field
9003 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
9004
9005 // The versionId of the object that the tag-set will be removed from.
9006 VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
9007}
9008
9009// String returns the string representation
9010func (s DeleteObjectTaggingInput) String() string {
9011 return awsutil.Prettify(s)
9012}
9013
9014// GoString returns the string representation
9015func (s DeleteObjectTaggingInput) GoString() string {
9016 return s.String()
9017}
9018
9019// Validate inspects the fields of the type to determine if they are valid.
9020func (s *DeleteObjectTaggingInput) Validate() error {
9021 invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"}
9022 if s.Bucket == nil {
9023 invalidParams.Add(request.NewErrParamRequired("Bucket"))
9024 }
9025 if s.Key == nil {
9026 invalidParams.Add(request.NewErrParamRequired("Key"))
9027 }
9028 if s.Key != nil && len(*s.Key) < 1 {
9029 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
9030 }
9031
9032 if invalidParams.Len() > 0 {
9033 return invalidParams
9034 }
9035 return nil
9036}
9037
9038// SetBucket sets the Bucket field's value.
9039func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput {
9040 s.Bucket = &v
9041 return s
9042}
9043
9044// SetKey sets the Key field's value.
9045func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput {
9046 s.Key = &v
9047 return s
9048}
9049
9050// SetVersionId sets the VersionId field's value.
9051func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput {
9052 s.VersionId = &v
9053 return s
9054}
9055
9056// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput
9057type DeleteObjectTaggingOutput struct {
9058 _ struct{} `type:"structure"`
9059
9060 // The versionId of the object the tag-set was removed from.
9061 VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
9062}
9063
9064// String returns the string representation
9065func (s DeleteObjectTaggingOutput) String() string {
9066 return awsutil.Prettify(s)
9067}
9068
9069// GoString returns the string representation
9070func (s DeleteObjectTaggingOutput) GoString() string {
9071 return s.String()
9072}
9073
9074// SetVersionId sets the VersionId field's value.
9075func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput {
9076 s.VersionId = &v
9077 return s
9078}
9079
9080// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest
9081type DeleteObjectsInput struct {
9082 _ struct{} `type:"structure" payload:"Delete"`
9083
9084 // Bucket is a required field
9085 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
9086
9087 // Delete is a required field
9088 Delete *Delete `locationName:"Delete" type:"structure" required:"true"`
9089
9090 // The concatenation of the authentication device's serial number, a space,
9091 // and the value that is displayed on your authentication device.
9092 MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
9093
9094 // Confirms that the requester knows that she or he will be charged for the
9095 // request. Bucket owners need not specify this parameter in their requests.
9096 // Documentation on downloading objects from requester pays buckets can be found
9097 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
9098 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
9099}
9100
9101// String returns the string representation
9102func (s DeleteObjectsInput) String() string {
9103 return awsutil.Prettify(s)
9104}
9105
9106// GoString returns the string representation
9107func (s DeleteObjectsInput) GoString() string {
9108 return s.String()
9109}
9110
9111// Validate inspects the fields of the type to determine if they are valid.
9112func (s *DeleteObjectsInput) Validate() error {
9113 invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"}
9114 if s.Bucket == nil {
9115 invalidParams.Add(request.NewErrParamRequired("Bucket"))
9116 }
9117 if s.Delete == nil {
9118 invalidParams.Add(request.NewErrParamRequired("Delete"))
9119 }
9120 if s.Delete != nil {
9121 if err := s.Delete.Validate(); err != nil {
9122 invalidParams.AddNested("Delete", err.(request.ErrInvalidParams))
9123 }
9124 }
9125
9126 if invalidParams.Len() > 0 {
9127 return invalidParams
9128 }
9129 return nil
9130}
9131
9132// SetBucket sets the Bucket field's value.
9133func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput {
9134 s.Bucket = &v
9135 return s
9136}
9137
9138// SetDelete sets the Delete field's value.
9139func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput {
9140 s.Delete = v
9141 return s
9142}
9143
9144// SetMFA sets the MFA field's value.
9145func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput {
9146 s.MFA = &v
9147 return s
9148}
9149
9150// SetRequestPayer sets the RequestPayer field's value.
9151func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput {
9152 s.RequestPayer = &v
9153 return s
9154}
9155
9156// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput
9157type DeleteObjectsOutput struct {
9158 _ struct{} `type:"structure"`
9159
9160 Deleted []*DeletedObject `type:"list" flattened:"true"`
9161
9162 Errors []*Error `locationName:"Error" type:"list" flattened:"true"`
9163
9164 // If present, indicates that the requester was successfully charged for the
9165 // request.
9166 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
9167}
9168
9169// String returns the string representation
9170func (s DeleteObjectsOutput) String() string {
9171 return awsutil.Prettify(s)
9172}
9173
9174// GoString returns the string representation
9175func (s DeleteObjectsOutput) GoString() string {
9176 return s.String()
9177}
9178
9179// SetDeleted sets the Deleted field's value.
9180func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput {
9181 s.Deleted = v
9182 return s
9183}
9184
9185// SetErrors sets the Errors field's value.
9186func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput {
9187 s.Errors = v
9188 return s
9189}
9190
9191// SetRequestCharged sets the RequestCharged field's value.
9192func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput {
9193 s.RequestCharged = &v
9194 return s
9195}
9196
9197// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject
9198type DeletedObject struct {
9199 _ struct{} `type:"structure"`
9200
9201 DeleteMarker *bool `type:"boolean"`
9202
9203 DeleteMarkerVersionId *string `type:"string"`
9204
9205 Key *string `min:"1" type:"string"`
9206
9207 VersionId *string `type:"string"`
9208}
9209
9210// String returns the string representation
9211func (s DeletedObject) String() string {
9212 return awsutil.Prettify(s)
9213}
9214
9215// GoString returns the string representation
9216func (s DeletedObject) GoString() string {
9217 return s.String()
9218}
9219
9220// SetDeleteMarker sets the DeleteMarker field's value.
9221func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject {
9222 s.DeleteMarker = &v
9223 return s
9224}
9225
9226// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value.
9227func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject {
9228 s.DeleteMarkerVersionId = &v
9229 return s
9230}
9231
9232// SetKey sets the Key field's value.
9233func (s *DeletedObject) SetKey(v string) *DeletedObject {
9234 s.Key = &v
9235 return s
9236}
9237
9238// SetVersionId sets the VersionId field's value.
9239func (s *DeletedObject) SetVersionId(v string) *DeletedObject {
9240 s.VersionId = &v
9241 return s
9242}
9243
9244// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination
9245type Destination struct {
9246 _ struct{} `type:"structure"`
9247
9248 // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store
9249 // replicas of the object identified by the rule.
9250 //
9251 // Bucket is a required field
9252 Bucket *string `type:"string" required:"true"`
9253
9254 // The class of storage used to store the object.
9255 StorageClass *string `type:"string" enum:"StorageClass"`
9256}
9257
9258// String returns the string representation
9259func (s Destination) String() string {
9260 return awsutil.Prettify(s)
9261}
9262
9263// GoString returns the string representation
9264func (s Destination) GoString() string {
9265 return s.String()
9266}
9267
9268// Validate inspects the fields of the type to determine if they are valid.
9269func (s *Destination) Validate() error {
9270 invalidParams := request.ErrInvalidParams{Context: "Destination"}
9271 if s.Bucket == nil {
9272 invalidParams.Add(request.NewErrParamRequired("Bucket"))
9273 }
9274
9275 if invalidParams.Len() > 0 {
9276 return invalidParams
9277 }
9278 return nil
9279}
9280
9281// SetBucket sets the Bucket field's value.
9282func (s *Destination) SetBucket(v string) *Destination {
9283 s.Bucket = &v
9284 return s
9285}
9286
9287// SetStorageClass sets the StorageClass field's value.
9288func (s *Destination) SetStorageClass(v string) *Destination {
9289 s.StorageClass = &v
9290 return s
9291}
9292
9293// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error
9294type Error struct {
9295 _ struct{} `type:"structure"`
9296
9297 Code *string `type:"string"`
9298
9299 Key *string `min:"1" type:"string"`
9300
9301 Message *string `type:"string"`
9302
9303 VersionId *string `type:"string"`
9304}
9305
9306// String returns the string representation
9307func (s Error) String() string {
9308 return awsutil.Prettify(s)
9309}
9310
9311// GoString returns the string representation
9312func (s Error) GoString() string {
9313 return s.String()
9314}
9315
9316// SetCode sets the Code field's value.
9317func (s *Error) SetCode(v string) *Error {
9318 s.Code = &v
9319 return s
9320}
9321
9322// SetKey sets the Key field's value.
9323func (s *Error) SetKey(v string) *Error {
9324 s.Key = &v
9325 return s
9326}
9327
9328// SetMessage sets the Message field's value.
9329func (s *Error) SetMessage(v string) *Error {
9330 s.Message = &v
9331 return s
9332}
9333
9334// SetVersionId sets the VersionId field's value.
9335func (s *Error) SetVersionId(v string) *Error {
9336 s.VersionId = &v
9337 return s
9338}
9339
9340// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument
9341type ErrorDocument struct {
9342 _ struct{} `type:"structure"`
9343
9344 // The object key name to use when a 4XX class error occurs.
9345 //
9346 // Key is a required field
9347 Key *string `min:"1" type:"string" required:"true"`
9348}
9349
9350// String returns the string representation
9351func (s ErrorDocument) String() string {
9352 return awsutil.Prettify(s)
9353}
9354
9355// GoString returns the string representation
9356func (s ErrorDocument) GoString() string {
9357 return s.String()
9358}
9359
9360// Validate inspects the fields of the type to determine if they are valid.
9361func (s *ErrorDocument) Validate() error {
9362 invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"}
9363 if s.Key == nil {
9364 invalidParams.Add(request.NewErrParamRequired("Key"))
9365 }
9366 if s.Key != nil && len(*s.Key) < 1 {
9367 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
9368 }
9369
9370 if invalidParams.Len() > 0 {
9371 return invalidParams
9372 }
9373 return nil
9374}
9375
9376// SetKey sets the Key field's value.
9377func (s *ErrorDocument) SetKey(v string) *ErrorDocument {
9378 s.Key = &v
9379 return s
9380}
9381
9382// Container for key value pair that defines the criteria for the filter rule.
9383// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule
9384type FilterRule struct {
9385 _ struct{} `type:"structure"`
9386
9387 // Object key name prefix or suffix identifying one or more objects to which
9388 // the filtering rule applies. Maximum prefix length can be up to 1,024 characters.
9389 // Overlapping prefixes and suffixes are not supported. For more information,
9390 // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
9391 Name *string `type:"string" enum:"FilterRuleName"`
9392
9393 Value *string `type:"string"`
9394}
9395
9396// String returns the string representation
9397func (s FilterRule) String() string {
9398 return awsutil.Prettify(s)
9399}
9400
9401// GoString returns the string representation
9402func (s FilterRule) GoString() string {
9403 return s.String()
9404}
9405
9406// SetName sets the Name field's value.
9407func (s *FilterRule) SetName(v string) *FilterRule {
9408 s.Name = &v
9409 return s
9410}
9411
9412// SetValue sets the Value field's value.
9413func (s *FilterRule) SetValue(v string) *FilterRule {
9414 s.Value = &v
9415 return s
9416}
9417
9418// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest
9419type GetBucketAccelerateConfigurationInput struct {
9420 _ struct{} `type:"structure"`
9421
9422 // Name of the bucket for which the accelerate configuration is retrieved.
9423 //
9424 // Bucket is a required field
9425 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
9426}
9427
9428// String returns the string representation
9429func (s GetBucketAccelerateConfigurationInput) String() string {
9430 return awsutil.Prettify(s)
9431}
9432
9433// GoString returns the string representation
9434func (s GetBucketAccelerateConfigurationInput) GoString() string {
9435 return s.String()
9436}
9437
9438// Validate inspects the fields of the type to determine if they are valid.
9439func (s *GetBucketAccelerateConfigurationInput) Validate() error {
9440 invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"}
9441 if s.Bucket == nil {
9442 invalidParams.Add(request.NewErrParamRequired("Bucket"))
9443 }
9444
9445 if invalidParams.Len() > 0 {
9446 return invalidParams
9447 }
9448 return nil
9449}
9450
9451// SetBucket sets the Bucket field's value.
9452func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput {
9453 s.Bucket = &v
9454 return s
9455}
9456
9457// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput
9458type GetBucketAccelerateConfigurationOutput struct {
9459 _ struct{} `type:"structure"`
9460
9461 // The accelerate configuration of the bucket.
9462 Status *string `type:"string" enum:"BucketAccelerateStatus"`
9463}
9464
9465// String returns the string representation
9466func (s GetBucketAccelerateConfigurationOutput) String() string {
9467 return awsutil.Prettify(s)
9468}
9469
9470// GoString returns the string representation
9471func (s GetBucketAccelerateConfigurationOutput) GoString() string {
9472 return s.String()
9473}
9474
9475// SetStatus sets the Status field's value.
9476func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput {
9477 s.Status = &v
9478 return s
9479}
9480
9481// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest
9482type GetBucketAclInput struct {
9483 _ struct{} `type:"structure"`
9484
9485 // Bucket is a required field
9486 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
9487}
9488
9489// String returns the string representation
9490func (s GetBucketAclInput) String() string {
9491 return awsutil.Prettify(s)
9492}
9493
9494// GoString returns the string representation
9495func (s GetBucketAclInput) GoString() string {
9496 return s.String()
9497}
9498
9499// Validate inspects the fields of the type to determine if they are valid.
9500func (s *GetBucketAclInput) Validate() error {
9501 invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"}
9502 if s.Bucket == nil {
9503 invalidParams.Add(request.NewErrParamRequired("Bucket"))
9504 }
9505
9506 if invalidParams.Len() > 0 {
9507 return invalidParams
9508 }
9509 return nil
9510}
9511
9512// SetBucket sets the Bucket field's value.
9513func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput {
9514 s.Bucket = &v
9515 return s
9516}
9517
9518// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput
9519type GetBucketAclOutput struct {
9520 _ struct{} `type:"structure"`
9521
9522 // A list of grants.
9523 Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
9524
9525 Owner *Owner `type:"structure"`
9526}
9527
9528// String returns the string representation
9529func (s GetBucketAclOutput) String() string {
9530 return awsutil.Prettify(s)
9531}
9532
9533// GoString returns the string representation
9534func (s GetBucketAclOutput) GoString() string {
9535 return s.String()
9536}
9537
9538// SetGrants sets the Grants field's value.
9539func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput {
9540 s.Grants = v
9541 return s
9542}
9543
9544// SetOwner sets the Owner field's value.
9545func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput {
9546 s.Owner = v
9547 return s
9548}
9549
9550// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest
9551type GetBucketAnalyticsConfigurationInput struct {
9552 _ struct{} `type:"structure"`
9553
9554 // The name of the bucket from which an analytics configuration is retrieved.
9555 //
9556 // Bucket is a required field
9557 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
9558
9559 // The identifier used to represent an analytics configuration.
9560 //
9561 // Id is a required field
9562 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
9563}
9564
9565// String returns the string representation
9566func (s GetBucketAnalyticsConfigurationInput) String() string {
9567 return awsutil.Prettify(s)
9568}
9569
9570// GoString returns the string representation
9571func (s GetBucketAnalyticsConfigurationInput) GoString() string {
9572 return s.String()
9573}
9574
9575// Validate inspects the fields of the type to determine if they are valid.
9576func (s *GetBucketAnalyticsConfigurationInput) Validate() error {
9577 invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"}
9578 if s.Bucket == nil {
9579 invalidParams.Add(request.NewErrParamRequired("Bucket"))
9580 }
9581 if s.Id == nil {
9582 invalidParams.Add(request.NewErrParamRequired("Id"))
9583 }
9584
9585 if invalidParams.Len() > 0 {
9586 return invalidParams
9587 }
9588 return nil
9589}
9590
9591// SetBucket sets the Bucket field's value.
9592func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput {
9593 s.Bucket = &v
9594 return s
9595}
9596
9597// SetId sets the Id field's value.
9598func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput {
9599 s.Id = &v
9600 return s
9601}
9602
9603// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput
9604type GetBucketAnalyticsConfigurationOutput struct {
9605 _ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
9606
9607 // The configuration and any analyses for the analytics filter.
9608 AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"`
9609}
9610
9611// String returns the string representation
9612func (s GetBucketAnalyticsConfigurationOutput) String() string {
9613 return awsutil.Prettify(s)
9614}
9615
9616// GoString returns the string representation
9617func (s GetBucketAnalyticsConfigurationOutput) GoString() string {
9618 return s.String()
9619}
9620
9621// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value.
9622func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput {
9623 s.AnalyticsConfiguration = v
9624 return s
9625}
9626
9627// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest
9628type GetBucketCorsInput struct {
9629 _ struct{} `type:"structure"`
9630
9631 // Bucket is a required field
9632 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
9633}
9634
9635// String returns the string representation
9636func (s GetBucketCorsInput) String() string {
9637 return awsutil.Prettify(s)
9638}
9639
9640// GoString returns the string representation
9641func (s GetBucketCorsInput) GoString() string {
9642 return s.String()
9643}
9644
9645// Validate inspects the fields of the type to determine if they are valid.
9646func (s *GetBucketCorsInput) Validate() error {
9647 invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"}
9648 if s.Bucket == nil {
9649 invalidParams.Add(request.NewErrParamRequired("Bucket"))
9650 }
9651
9652 if invalidParams.Len() > 0 {
9653 return invalidParams
9654 }
9655 return nil
9656}
9657
9658// SetBucket sets the Bucket field's value.
9659func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput {
9660 s.Bucket = &v
9661 return s
9662}
9663
9664// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput
9665type GetBucketCorsOutput struct {
9666 _ struct{} `type:"structure"`
9667
9668 CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"`
9669}
9670
9671// String returns the string representation
9672func (s GetBucketCorsOutput) String() string {
9673 return awsutil.Prettify(s)
9674}
9675
9676// GoString returns the string representation
9677func (s GetBucketCorsOutput) GoString() string {
9678 return s.String()
9679}
9680
9681// SetCORSRules sets the CORSRules field's value.
9682func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput {
9683 s.CORSRules = v
9684 return s
9685}
9686
9687// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest
9688type GetBucketInventoryConfigurationInput struct {
9689 _ struct{} `type:"structure"`
9690
9691 // The name of the bucket containing the inventory configuration to retrieve.
9692 //
9693 // Bucket is a required field
9694 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
9695
9696 // The ID used to identify the inventory configuration.
9697 //
9698 // Id is a required field
9699 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
9700}
9701
9702// String returns the string representation
9703func (s GetBucketInventoryConfigurationInput) String() string {
9704 return awsutil.Prettify(s)
9705}
9706
9707// GoString returns the string representation
9708func (s GetBucketInventoryConfigurationInput) GoString() string {
9709 return s.String()
9710}
9711
9712// Validate inspects the fields of the type to determine if they are valid.
9713func (s *GetBucketInventoryConfigurationInput) Validate() error {
9714 invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"}
9715 if s.Bucket == nil {
9716 invalidParams.Add(request.NewErrParamRequired("Bucket"))
9717 }
9718 if s.Id == nil {
9719 invalidParams.Add(request.NewErrParamRequired("Id"))
9720 }
9721
9722 if invalidParams.Len() > 0 {
9723 return invalidParams
9724 }
9725 return nil
9726}
9727
9728// SetBucket sets the Bucket field's value.
9729func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput {
9730 s.Bucket = &v
9731 return s
9732}
9733
9734// SetId sets the Id field's value.
9735func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput {
9736 s.Id = &v
9737 return s
9738}
9739
9740// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput
9741type GetBucketInventoryConfigurationOutput struct {
9742 _ struct{} `type:"structure" payload:"InventoryConfiguration"`
9743
9744 // Specifies the inventory configuration.
9745 InventoryConfiguration *InventoryConfiguration `type:"structure"`
9746}
9747
9748// String returns the string representation
9749func (s GetBucketInventoryConfigurationOutput) String() string {
9750 return awsutil.Prettify(s)
9751}
9752
9753// GoString returns the string representation
9754func (s GetBucketInventoryConfigurationOutput) GoString() string {
9755 return s.String()
9756}
9757
9758// SetInventoryConfiguration sets the InventoryConfiguration field's value.
9759func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput {
9760 s.InventoryConfiguration = v
9761 return s
9762}
9763
9764// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest
9765type GetBucketLifecycleConfigurationInput struct {
9766 _ struct{} `type:"structure"`
9767
9768 // Bucket is a required field
9769 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
9770}
9771
9772// String returns the string representation
9773func (s GetBucketLifecycleConfigurationInput) String() string {
9774 return awsutil.Prettify(s)
9775}
9776
9777// GoString returns the string representation
9778func (s GetBucketLifecycleConfigurationInput) GoString() string {
9779 return s.String()
9780}
9781
9782// Validate inspects the fields of the type to determine if they are valid.
9783func (s *GetBucketLifecycleConfigurationInput) Validate() error {
9784 invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"}
9785 if s.Bucket == nil {
9786 invalidParams.Add(request.NewErrParamRequired("Bucket"))
9787 }
9788
9789 if invalidParams.Len() > 0 {
9790 return invalidParams
9791 }
9792 return nil
9793}
9794
9795// SetBucket sets the Bucket field's value.
9796func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput {
9797 s.Bucket = &v
9798 return s
9799}
9800
9801// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput
9802type GetBucketLifecycleConfigurationOutput struct {
9803 _ struct{} `type:"structure"`
9804
9805 Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"`
9806}
9807
9808// String returns the string representation
9809func (s GetBucketLifecycleConfigurationOutput) String() string {
9810 return awsutil.Prettify(s)
9811}
9812
9813// GoString returns the string representation
9814func (s GetBucketLifecycleConfigurationOutput) GoString() string {
9815 return s.String()
9816}
9817
9818// SetRules sets the Rules field's value.
9819func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput {
9820 s.Rules = v
9821 return s
9822}
9823
9824// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest
9825type GetBucketLifecycleInput struct {
9826 _ struct{} `type:"structure"`
9827
9828 // Bucket is a required field
9829 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
9830}
9831
9832// String returns the string representation
9833func (s GetBucketLifecycleInput) String() string {
9834 return awsutil.Prettify(s)
9835}
9836
9837// GoString returns the string representation
9838func (s GetBucketLifecycleInput) GoString() string {
9839 return s.String()
9840}
9841
9842// Validate inspects the fields of the type to determine if they are valid.
9843func (s *GetBucketLifecycleInput) Validate() error {
9844 invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"}
9845 if s.Bucket == nil {
9846 invalidParams.Add(request.NewErrParamRequired("Bucket"))
9847 }
9848
9849 if invalidParams.Len() > 0 {
9850 return invalidParams
9851 }
9852 return nil
9853}
9854
9855// SetBucket sets the Bucket field's value.
9856func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput {
9857 s.Bucket = &v
9858 return s
9859}
9860
9861// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput
9862type GetBucketLifecycleOutput struct {
9863 _ struct{} `type:"structure"`
9864
9865 Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"`
9866}
9867
9868// String returns the string representation
9869func (s GetBucketLifecycleOutput) String() string {
9870 return awsutil.Prettify(s)
9871}
9872
9873// GoString returns the string representation
9874func (s GetBucketLifecycleOutput) GoString() string {
9875 return s.String()
9876}
9877
9878// SetRules sets the Rules field's value.
9879func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput {
9880 s.Rules = v
9881 return s
9882}
9883
9884// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest
9885type GetBucketLocationInput struct {
9886 _ struct{} `type:"structure"`
9887
9888 // Bucket is a required field
9889 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
9890}
9891
9892// String returns the string representation
9893func (s GetBucketLocationInput) String() string {
9894 return awsutil.Prettify(s)
9895}
9896
9897// GoString returns the string representation
9898func (s GetBucketLocationInput) GoString() string {
9899 return s.String()
9900}
9901
9902// Validate inspects the fields of the type to determine if they are valid.
9903func (s *GetBucketLocationInput) Validate() error {
9904 invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"}
9905 if s.Bucket == nil {
9906 invalidParams.Add(request.NewErrParamRequired("Bucket"))
9907 }
9908
9909 if invalidParams.Len() > 0 {
9910 return invalidParams
9911 }
9912 return nil
9913}
9914
9915// SetBucket sets the Bucket field's value.
9916func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput {
9917 s.Bucket = &v
9918 return s
9919}
9920
9921// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput
9922type GetBucketLocationOutput struct {
9923 _ struct{} `type:"structure"`
9924
9925 LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
9926}
9927
9928// String returns the string representation
9929func (s GetBucketLocationOutput) String() string {
9930 return awsutil.Prettify(s)
9931}
9932
9933// GoString returns the string representation
9934func (s GetBucketLocationOutput) GoString() string {
9935 return s.String()
9936}
9937
9938// SetLocationConstraint sets the LocationConstraint field's value.
9939func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput {
9940 s.LocationConstraint = &v
9941 return s
9942}
9943
9944// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest
9945type GetBucketLoggingInput struct {
9946 _ struct{} `type:"structure"`
9947
9948 // Bucket is a required field
9949 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
9950}
9951
9952// String returns the string representation
9953func (s GetBucketLoggingInput) String() string {
9954 return awsutil.Prettify(s)
9955}
9956
9957// GoString returns the string representation
9958func (s GetBucketLoggingInput) GoString() string {
9959 return s.String()
9960}
9961
9962// Validate inspects the fields of the type to determine if they are valid.
9963func (s *GetBucketLoggingInput) Validate() error {
9964 invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"}
9965 if s.Bucket == nil {
9966 invalidParams.Add(request.NewErrParamRequired("Bucket"))
9967 }
9968
9969 if invalidParams.Len() > 0 {
9970 return invalidParams
9971 }
9972 return nil
9973}
9974
9975// SetBucket sets the Bucket field's value.
9976func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput {
9977 s.Bucket = &v
9978 return s
9979}
9980
9981// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput
9982type GetBucketLoggingOutput struct {
9983 _ struct{} `type:"structure"`
9984
9985 LoggingEnabled *LoggingEnabled `type:"structure"`
9986}
9987
9988// String returns the string representation
9989func (s GetBucketLoggingOutput) String() string {
9990 return awsutil.Prettify(s)
9991}
9992
9993// GoString returns the string representation
9994func (s GetBucketLoggingOutput) GoString() string {
9995 return s.String()
9996}
9997
9998// SetLoggingEnabled sets the LoggingEnabled field's value.
9999func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput {
10000 s.LoggingEnabled = v
10001 return s
10002}
10003
10004// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest
10005type GetBucketMetricsConfigurationInput struct {
10006 _ struct{} `type:"structure"`
10007
10008 // The name of the bucket containing the metrics configuration to retrieve.
10009 //
10010 // Bucket is a required field
10011 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
10012
10013 // The ID used to identify the metrics configuration.
10014 //
10015 // Id is a required field
10016 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
10017}
10018
10019// String returns the string representation
10020func (s GetBucketMetricsConfigurationInput) String() string {
10021 return awsutil.Prettify(s)
10022}
10023
10024// GoString returns the string representation
10025func (s GetBucketMetricsConfigurationInput) GoString() string {
10026 return s.String()
10027}
10028
10029// Validate inspects the fields of the type to determine if they are valid.
10030func (s *GetBucketMetricsConfigurationInput) Validate() error {
10031 invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"}
10032 if s.Bucket == nil {
10033 invalidParams.Add(request.NewErrParamRequired("Bucket"))
10034 }
10035 if s.Id == nil {
10036 invalidParams.Add(request.NewErrParamRequired("Id"))
10037 }
10038
10039 if invalidParams.Len() > 0 {
10040 return invalidParams
10041 }
10042 return nil
10043}
10044
10045// SetBucket sets the Bucket field's value.
10046func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput {
10047 s.Bucket = &v
10048 return s
10049}
10050
10051// SetId sets the Id field's value.
10052func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput {
10053 s.Id = &v
10054 return s
10055}
10056
10057// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput
10058type GetBucketMetricsConfigurationOutput struct {
10059 _ struct{} `type:"structure" payload:"MetricsConfiguration"`
10060
10061 // Specifies the metrics configuration.
10062 MetricsConfiguration *MetricsConfiguration `type:"structure"`
10063}
10064
10065// String returns the string representation
10066func (s GetBucketMetricsConfigurationOutput) String() string {
10067 return awsutil.Prettify(s)
10068}
10069
10070// GoString returns the string representation
10071func (s GetBucketMetricsConfigurationOutput) GoString() string {
10072 return s.String()
10073}
10074
10075// SetMetricsConfiguration sets the MetricsConfiguration field's value.
10076func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput {
10077 s.MetricsConfiguration = v
10078 return s
10079}
10080
10081// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest
10082type GetBucketNotificationConfigurationRequest struct {
10083 _ struct{} `type:"structure"`
10084
10085 // Name of the bucket to get the notification configuration for.
10086 //
10087 // Bucket is a required field
10088 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
10089}
10090
10091// String returns the string representation
10092func (s GetBucketNotificationConfigurationRequest) String() string {
10093 return awsutil.Prettify(s)
10094}
10095
10096// GoString returns the string representation
10097func (s GetBucketNotificationConfigurationRequest) GoString() string {
10098 return s.String()
10099}
10100
10101// Validate inspects the fields of the type to determine if they are valid.
10102func (s *GetBucketNotificationConfigurationRequest) Validate() error {
10103 invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"}
10104 if s.Bucket == nil {
10105 invalidParams.Add(request.NewErrParamRequired("Bucket"))
10106 }
10107
10108 if invalidParams.Len() > 0 {
10109 return invalidParams
10110 }
10111 return nil
10112}
10113
10114// SetBucket sets the Bucket field's value.
10115func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest {
10116 s.Bucket = &v
10117 return s
10118}
10119
10120// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest
10121type GetBucketPolicyInput struct {
10122 _ struct{} `type:"structure"`
10123
10124 // Bucket is a required field
10125 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
10126}
10127
10128// String returns the string representation
10129func (s GetBucketPolicyInput) String() string {
10130 return awsutil.Prettify(s)
10131}
10132
10133// GoString returns the string representation
10134func (s GetBucketPolicyInput) GoString() string {
10135 return s.String()
10136}
10137
10138// Validate inspects the fields of the type to determine if they are valid.
10139func (s *GetBucketPolicyInput) Validate() error {
10140 invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"}
10141 if s.Bucket == nil {
10142 invalidParams.Add(request.NewErrParamRequired("Bucket"))
10143 }
10144
10145 if invalidParams.Len() > 0 {
10146 return invalidParams
10147 }
10148 return nil
10149}
10150
10151// SetBucket sets the Bucket field's value.
10152func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput {
10153 s.Bucket = &v
10154 return s
10155}
10156
10157// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput
10158type GetBucketPolicyOutput struct {
10159 _ struct{} `type:"structure" payload:"Policy"`
10160
10161 // The bucket policy as a JSON document.
10162 Policy *string `type:"string"`
10163}
10164
10165// String returns the string representation
10166func (s GetBucketPolicyOutput) String() string {
10167 return awsutil.Prettify(s)
10168}
10169
10170// GoString returns the string representation
10171func (s GetBucketPolicyOutput) GoString() string {
10172 return s.String()
10173}
10174
10175// SetPolicy sets the Policy field's value.
10176func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput {
10177 s.Policy = &v
10178 return s
10179}
10180
10181// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest
10182type GetBucketReplicationInput struct {
10183 _ struct{} `type:"structure"`
10184
10185 // Bucket is a required field
10186 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
10187}
10188
10189// String returns the string representation
10190func (s GetBucketReplicationInput) String() string {
10191 return awsutil.Prettify(s)
10192}
10193
10194// GoString returns the string representation
10195func (s GetBucketReplicationInput) GoString() string {
10196 return s.String()
10197}
10198
10199// Validate inspects the fields of the type to determine if they are valid.
10200func (s *GetBucketReplicationInput) Validate() error {
10201 invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"}
10202 if s.Bucket == nil {
10203 invalidParams.Add(request.NewErrParamRequired("Bucket"))
10204 }
10205
10206 if invalidParams.Len() > 0 {
10207 return invalidParams
10208 }
10209 return nil
10210}
10211
10212// SetBucket sets the Bucket field's value.
10213func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput {
10214 s.Bucket = &v
10215 return s
10216}
10217
10218// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput
10219type GetBucketReplicationOutput struct {
10220 _ struct{} `type:"structure" payload:"ReplicationConfiguration"`
10221
10222 // Container for replication rules. You can add as many as 1,000 rules. Total
10223 // replication configuration size can be up to 2 MB.
10224 ReplicationConfiguration *ReplicationConfiguration `type:"structure"`
10225}
10226
10227// String returns the string representation
10228func (s GetBucketReplicationOutput) String() string {
10229 return awsutil.Prettify(s)
10230}
10231
10232// GoString returns the string representation
10233func (s GetBucketReplicationOutput) GoString() string {
10234 return s.String()
10235}
10236
10237// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
10238func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput {
10239 s.ReplicationConfiguration = v
10240 return s
10241}
10242
10243// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest
10244type GetBucketRequestPaymentInput struct {
10245 _ struct{} `type:"structure"`
10246
10247 // Bucket is a required field
10248 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
10249}
10250
10251// String returns the string representation
10252func (s GetBucketRequestPaymentInput) String() string {
10253 return awsutil.Prettify(s)
10254}
10255
10256// GoString returns the string representation
10257func (s GetBucketRequestPaymentInput) GoString() string {
10258 return s.String()
10259}
10260
10261// Validate inspects the fields of the type to determine if they are valid.
10262func (s *GetBucketRequestPaymentInput) Validate() error {
10263 invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"}
10264 if s.Bucket == nil {
10265 invalidParams.Add(request.NewErrParamRequired("Bucket"))
10266 }
10267
10268 if invalidParams.Len() > 0 {
10269 return invalidParams
10270 }
10271 return nil
10272}
10273
10274// SetBucket sets the Bucket field's value.
10275func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput {
10276 s.Bucket = &v
10277 return s
10278}
10279
10280// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput
10281type GetBucketRequestPaymentOutput struct {
10282 _ struct{} `type:"structure"`
10283
10284 // Specifies who pays for the download and request fees.
10285 Payer *string `type:"string" enum:"Payer"`
10286}
10287
10288// String returns the string representation
10289func (s GetBucketRequestPaymentOutput) String() string {
10290 return awsutil.Prettify(s)
10291}
10292
10293// GoString returns the string representation
10294func (s GetBucketRequestPaymentOutput) GoString() string {
10295 return s.String()
10296}
10297
10298// SetPayer sets the Payer field's value.
10299func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput {
10300 s.Payer = &v
10301 return s
10302}
10303
10304// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest
10305type GetBucketTaggingInput struct {
10306 _ struct{} `type:"structure"`
10307
10308 // Bucket is a required field
10309 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
10310}
10311
10312// String returns the string representation
10313func (s GetBucketTaggingInput) String() string {
10314 return awsutil.Prettify(s)
10315}
10316
10317// GoString returns the string representation
10318func (s GetBucketTaggingInput) GoString() string {
10319 return s.String()
10320}
10321
10322// Validate inspects the fields of the type to determine if they are valid.
10323func (s *GetBucketTaggingInput) Validate() error {
10324 invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"}
10325 if s.Bucket == nil {
10326 invalidParams.Add(request.NewErrParamRequired("Bucket"))
10327 }
10328
10329 if invalidParams.Len() > 0 {
10330 return invalidParams
10331 }
10332 return nil
10333}
10334
10335// SetBucket sets the Bucket field's value.
10336func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput {
10337 s.Bucket = &v
10338 return s
10339}
10340
10341// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput
10342type GetBucketTaggingOutput struct {
10343 _ struct{} `type:"structure"`
10344
10345 // TagSet is a required field
10346 TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
10347}
10348
10349// String returns the string representation
10350func (s GetBucketTaggingOutput) String() string {
10351 return awsutil.Prettify(s)
10352}
10353
10354// GoString returns the string representation
10355func (s GetBucketTaggingOutput) GoString() string {
10356 return s.String()
10357}
10358
10359// SetTagSet sets the TagSet field's value.
10360func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput {
10361 s.TagSet = v
10362 return s
10363}
10364
10365// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest
10366type GetBucketVersioningInput struct {
10367 _ struct{} `type:"structure"`
10368
10369 // Bucket is a required field
10370 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
10371}
10372
10373// String returns the string representation
10374func (s GetBucketVersioningInput) String() string {
10375 return awsutil.Prettify(s)
10376}
10377
10378// GoString returns the string representation
10379func (s GetBucketVersioningInput) GoString() string {
10380 return s.String()
10381}
10382
10383// Validate inspects the fields of the type to determine if they are valid.
10384func (s *GetBucketVersioningInput) Validate() error {
10385 invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"}
10386 if s.Bucket == nil {
10387 invalidParams.Add(request.NewErrParamRequired("Bucket"))
10388 }
10389
10390 if invalidParams.Len() > 0 {
10391 return invalidParams
10392 }
10393 return nil
10394}
10395
10396// SetBucket sets the Bucket field's value.
10397func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput {
10398 s.Bucket = &v
10399 return s
10400}
10401
10402// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput
10403type GetBucketVersioningOutput struct {
10404 _ struct{} `type:"structure"`
10405
10406 // Specifies whether MFA delete is enabled in the bucket versioning configuration.
10407 // This element is only returned if the bucket has been configured with MFA
10408 // delete. If the bucket has never been so configured, this element is not returned.
10409 MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"`
10410
10411 // The versioning state of the bucket.
10412 Status *string `type:"string" enum:"BucketVersioningStatus"`
10413}
10414
10415// String returns the string representation
10416func (s GetBucketVersioningOutput) String() string {
10417 return awsutil.Prettify(s)
10418}
10419
10420// GoString returns the string representation
10421func (s GetBucketVersioningOutput) GoString() string {
10422 return s.String()
10423}
10424
10425// SetMFADelete sets the MFADelete field's value.
10426func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput {
10427 s.MFADelete = &v
10428 return s
10429}
10430
10431// SetStatus sets the Status field's value.
10432func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput {
10433 s.Status = &v
10434 return s
10435}
10436
10437// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest
10438type GetBucketWebsiteInput struct {
10439 _ struct{} `type:"structure"`
10440
10441 // Bucket is a required field
10442 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
10443}
10444
10445// String returns the string representation
10446func (s GetBucketWebsiteInput) String() string {
10447 return awsutil.Prettify(s)
10448}
10449
10450// GoString returns the string representation
10451func (s GetBucketWebsiteInput) GoString() string {
10452 return s.String()
10453}
10454
10455// Validate inspects the fields of the type to determine if they are valid.
10456func (s *GetBucketWebsiteInput) Validate() error {
10457 invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"}
10458 if s.Bucket == nil {
10459 invalidParams.Add(request.NewErrParamRequired("Bucket"))
10460 }
10461
10462 if invalidParams.Len() > 0 {
10463 return invalidParams
10464 }
10465 return nil
10466}
10467
10468// SetBucket sets the Bucket field's value.
10469func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput {
10470 s.Bucket = &v
10471 return s
10472}
10473
10474// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput
10475type GetBucketWebsiteOutput struct {
10476 _ struct{} `type:"structure"`
10477
10478 ErrorDocument *ErrorDocument `type:"structure"`
10479
10480 IndexDocument *IndexDocument `type:"structure"`
10481
10482 RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
10483
10484 RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
10485}
10486
10487// String returns the string representation
10488func (s GetBucketWebsiteOutput) String() string {
10489 return awsutil.Prettify(s)
10490}
10491
10492// GoString returns the string representation
10493func (s GetBucketWebsiteOutput) GoString() string {
10494 return s.String()
10495}
10496
10497// SetErrorDocument sets the ErrorDocument field's value.
10498func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput {
10499 s.ErrorDocument = v
10500 return s
10501}
10502
10503// SetIndexDocument sets the IndexDocument field's value.
10504func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput {
10505 s.IndexDocument = v
10506 return s
10507}
10508
10509// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value.
10510func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput {
10511 s.RedirectAllRequestsTo = v
10512 return s
10513}
10514
10515// SetRoutingRules sets the RoutingRules field's value.
10516func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput {
10517 s.RoutingRules = v
10518 return s
10519}
10520
10521// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest
10522type GetObjectAclInput struct {
10523 _ struct{} `type:"structure"`
10524
10525 // Bucket is a required field
10526 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
10527
10528 // Key is a required field
10529 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
10530
10531 // Confirms that the requester knows that she or he will be charged for the
10532 // request. Bucket owners need not specify this parameter in their requests.
10533 // Documentation on downloading objects from requester pays buckets can be found
10534 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
10535 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
10536
10537 // VersionId used to reference a specific version of the object.
10538 VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
10539}
10540
10541// String returns the string representation
10542func (s GetObjectAclInput) String() string {
10543 return awsutil.Prettify(s)
10544}
10545
10546// GoString returns the string representation
10547func (s GetObjectAclInput) GoString() string {
10548 return s.String()
10549}
10550
10551// Validate inspects the fields of the type to determine if they are valid.
10552func (s *GetObjectAclInput) Validate() error {
10553 invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"}
10554 if s.Bucket == nil {
10555 invalidParams.Add(request.NewErrParamRequired("Bucket"))
10556 }
10557 if s.Key == nil {
10558 invalidParams.Add(request.NewErrParamRequired("Key"))
10559 }
10560 if s.Key != nil && len(*s.Key) < 1 {
10561 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
10562 }
10563
10564 if invalidParams.Len() > 0 {
10565 return invalidParams
10566 }
10567 return nil
10568}
10569
10570// SetBucket sets the Bucket field's value.
10571func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput {
10572 s.Bucket = &v
10573 return s
10574}
10575
10576// SetKey sets the Key field's value.
10577func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput {
10578 s.Key = &v
10579 return s
10580}
10581
10582// SetRequestPayer sets the RequestPayer field's value.
10583func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput {
10584 s.RequestPayer = &v
10585 return s
10586}
10587
10588// SetVersionId sets the VersionId field's value.
10589func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput {
10590 s.VersionId = &v
10591 return s
10592}
10593
10594// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput
10595type GetObjectAclOutput struct {
10596 _ struct{} `type:"structure"`
10597
10598 // A list of grants.
10599 Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
10600
10601 Owner *Owner `type:"structure"`
10602
10603 // If present, indicates that the requester was successfully charged for the
10604 // request.
10605 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
10606}
10607
10608// String returns the string representation
10609func (s GetObjectAclOutput) String() string {
10610 return awsutil.Prettify(s)
10611}
10612
10613// GoString returns the string representation
10614func (s GetObjectAclOutput) GoString() string {
10615 return s.String()
10616}
10617
10618// SetGrants sets the Grants field's value.
10619func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput {
10620 s.Grants = v
10621 return s
10622}
10623
10624// SetOwner sets the Owner field's value.
10625func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput {
10626 s.Owner = v
10627 return s
10628}
10629
10630// SetRequestCharged sets the RequestCharged field's value.
10631func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput {
10632 s.RequestCharged = &v
10633 return s
10634}
10635
10636// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest
10637type GetObjectInput struct {
10638 _ struct{} `type:"structure"`
10639
10640 // Bucket is a required field
10641 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
10642
10643 // Return the object only if its entity tag (ETag) is the same as the one specified,
10644 // otherwise return a 412 (precondition failed).
10645 IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
10646
10647 // Return the object only if it has been modified since the specified time,
10648 // otherwise return a 304 (not modified).
10649 IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
10650
10651 // Return the object only if its entity tag (ETag) is different from the one
10652 // specified, otherwise return a 304 (not modified).
10653 IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
10654
10655 // Return the object only if it has not been modified since the specified time,
10656 // otherwise return a 412 (precondition failed).
10657 IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
10658
10659 // Key is a required field
10660 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
10661
10662 // Part number of the object being read. This is a positive integer between
10663 // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified.
10664 // Useful for downloading just a part of an object.
10665 PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"`
10666
10667 // Downloads the specified range bytes of an object. For more information about
10668 // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
10669 Range *string `location:"header" locationName:"Range" type:"string"`
10670
10671 // Confirms that the requester knows that she or he will be charged for the
10672 // request. Bucket owners need not specify this parameter in their requests.
10673 // Documentation on downloading objects from requester pays buckets can be found
10674 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
10675 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
10676
10677 // Sets the Cache-Control header of the response.
10678 ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"`
10679
10680 // Sets the Content-Disposition header of the response
10681 ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"`
10682
10683 // Sets the Content-Encoding header of the response.
10684 ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"`
10685
10686 // Sets the Content-Language header of the response.
10687 ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"`
10688
10689 // Sets the Content-Type header of the response.
10690 ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"`
10691
10692 // Sets the Expires header of the response.
10693 ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"`
10694
10695 // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
10696 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
10697
10698 // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
10699 // data. This value is used to store the object and then it is discarded; Amazon
10700 // does not store the encryption key. The key must be appropriate for use with
10701 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
10702 // header.
10703 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
10704
10705 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
10706 // Amazon S3 uses this header for a message integrity check to ensure the encryption
10707 // key was transmitted without error.
10708 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
10709
10710 // VersionId used to reference a specific version of the object.
10711 VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
10712}
10713
10714// String returns the string representation
10715func (s GetObjectInput) String() string {
10716 return awsutil.Prettify(s)
10717}
10718
10719// GoString returns the string representation
10720func (s GetObjectInput) GoString() string {
10721 return s.String()
10722}
10723
10724// Validate inspects the fields of the type to determine if they are valid.
10725func (s *GetObjectInput) Validate() error {
10726 invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"}
10727 if s.Bucket == nil {
10728 invalidParams.Add(request.NewErrParamRequired("Bucket"))
10729 }
10730 if s.Key == nil {
10731 invalidParams.Add(request.NewErrParamRequired("Key"))
10732 }
10733 if s.Key != nil && len(*s.Key) < 1 {
10734 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
10735 }
10736
10737 if invalidParams.Len() > 0 {
10738 return invalidParams
10739 }
10740 return nil
10741}
10742
10743// SetBucket sets the Bucket field's value.
10744func (s *GetObjectInput) SetBucket(v string) *GetObjectInput {
10745 s.Bucket = &v
10746 return s
10747}
10748
10749// SetIfMatch sets the IfMatch field's value.
10750func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput {
10751 s.IfMatch = &v
10752 return s
10753}
10754
10755// SetIfModifiedSince sets the IfModifiedSince field's value.
10756func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput {
10757 s.IfModifiedSince = &v
10758 return s
10759}
10760
10761// SetIfNoneMatch sets the IfNoneMatch field's value.
10762func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput {
10763 s.IfNoneMatch = &v
10764 return s
10765}
10766
10767// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value.
10768func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput {
10769 s.IfUnmodifiedSince = &v
10770 return s
10771}
10772
10773// SetKey sets the Key field's value.
10774func (s *GetObjectInput) SetKey(v string) *GetObjectInput {
10775 s.Key = &v
10776 return s
10777}
10778
10779// SetPartNumber sets the PartNumber field's value.
10780func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput {
10781 s.PartNumber = &v
10782 return s
10783}
10784
10785// SetRange sets the Range field's value.
10786func (s *GetObjectInput) SetRange(v string) *GetObjectInput {
10787 s.Range = &v
10788 return s
10789}
10790
10791// SetRequestPayer sets the RequestPayer field's value.
10792func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput {
10793 s.RequestPayer = &v
10794 return s
10795}
10796
10797// SetResponseCacheControl sets the ResponseCacheControl field's value.
10798func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput {
10799 s.ResponseCacheControl = &v
10800 return s
10801}
10802
10803// SetResponseContentDisposition sets the ResponseContentDisposition field's value.
10804func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput {
10805 s.ResponseContentDisposition = &v
10806 return s
10807}
10808
10809// SetResponseContentEncoding sets the ResponseContentEncoding field's value.
10810func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput {
10811 s.ResponseContentEncoding = &v
10812 return s
10813}
10814
10815// SetResponseContentLanguage sets the ResponseContentLanguage field's value.
10816func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput {
10817 s.ResponseContentLanguage = &v
10818 return s
10819}
10820
10821// SetResponseContentType sets the ResponseContentType field's value.
10822func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput {
10823 s.ResponseContentType = &v
10824 return s
10825}
10826
10827// SetResponseExpires sets the ResponseExpires field's value.
10828func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput {
10829 s.ResponseExpires = &v
10830 return s
10831}
10832
10833// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
10834func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput {
10835 s.SSECustomerAlgorithm = &v
10836 return s
10837}
10838
10839// SetSSECustomerKey sets the SSECustomerKey field's value.
10840func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput {
10841 s.SSECustomerKey = &v
10842 return s
10843}
10844
10845// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
10846func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput {
10847 s.SSECustomerKeyMD5 = &v
10848 return s
10849}
10850
10851// SetVersionId sets the VersionId field's value.
10852func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput {
10853 s.VersionId = &v
10854 return s
10855}
10856
10857// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput
10858type GetObjectOutput struct {
10859 _ struct{} `type:"structure" payload:"Body"`
10860
10861 AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
10862
10863 // Object data.
10864 Body io.ReadCloser `type:"blob"`
10865
10866 // Specifies caching behavior along the request/reply chain.
10867 CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
10868
10869 // Specifies presentational information for the object.
10870 ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
10871
10872 // Specifies what content encodings have been applied to the object and thus
10873 // what decoding mechanisms must be applied to obtain the media-type referenced
10874 // by the Content-Type header field.
10875 ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
10876
10877 // The language the content is in.
10878 ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
10879
10880 // Size of the body in bytes.
10881 ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
10882
10883 // The portion of the object returned in the response.
10884 ContentRange *string `location:"header" locationName:"Content-Range" type:"string"`
10885
10886 // A standard MIME type describing the format of the object data.
10887 ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
10888
10889 // Specifies whether the object retrieved was (true) or was not (false) a Delete
10890 // Marker. If false, this response header does not appear in the response.
10891 DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
10892
10893 // An ETag is an opaque identifier assigned by a web server to a specific version
10894 // of a resource found at a URL
10895 ETag *string `location:"header" locationName:"ETag" type:"string"`
10896
10897 // If the object expiration is configured (see PUT Bucket lifecycle), the response
10898 // includes this header. It includes the expiry-date and rule-id key value pairs
10899 // providing object expiration information. The value of the rule-id is URL
10900 // encoded.
10901 Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
10902
10903 // The date and time at which the object is no longer cacheable.
10904 Expires *string `location:"header" locationName:"Expires" type:"string"`
10905
10906 // Last modified date of the object
10907 LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
10908
10909 // A map of metadata to store with the object in S3.
10910 Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
10911
10912 // This is set to the number of metadata entries not returned in x-amz-meta
10913 // headers. This can happen if you create metadata using an API like SOAP that
10914 // supports more flexible metadata than the REST API. For example, using SOAP,
10915 // you can create metadata whose values are not legal HTTP headers.
10916 MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
10917
10918 // The count of parts this object has.
10919 PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"`
10920
10921 ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
10922
10923 // If present, indicates that the requester was successfully charged for the
10924 // request.
10925 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
10926
10927 // Provides information about object restoration operation and expiration time
10928 // of the restored object copy.
10929 Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
10930
10931 // If server-side encryption with a customer-provided encryption key was requested,
10932 // the response will include this header confirming the encryption algorithm
10933 // used.
10934 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
10935
10936 // If server-side encryption with a customer-provided encryption key was requested,
10937 // the response will include this header to provide round trip message integrity
10938 // verification of the customer-provided encryption key.
10939 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
10940
10941 // If present, specifies the ID of the AWS Key Management Service (KMS) master
10942 // encryption key that was used for the object.
10943 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
10944
10945 // The Server-side encryption algorithm used when storing this object in S3
10946 // (e.g., AES256, aws:kms).
10947 ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
10948
10949 StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
10950
10951 // The number of tags, if any, on the object.
10952 TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"`
10953
10954 // Version of the object.
10955 VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
10956
10957 // If the bucket is configured as a website, redirects requests for this object
10958 // to another object in the same bucket or to an external URL. Amazon S3 stores
10959 // the value of this header in the object metadata.
10960 WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
10961}
10962
10963// String returns the string representation
10964func (s GetObjectOutput) String() string {
10965 return awsutil.Prettify(s)
10966}
10967
10968// GoString returns the string representation
10969func (s GetObjectOutput) GoString() string {
10970 return s.String()
10971}
10972
10973// SetAcceptRanges sets the AcceptRanges field's value.
10974func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput {
10975 s.AcceptRanges = &v
10976 return s
10977}
10978
10979// SetBody sets the Body field's value.
10980func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput {
10981 s.Body = v
10982 return s
10983}
10984
10985// SetCacheControl sets the CacheControl field's value.
10986func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput {
10987 s.CacheControl = &v
10988 return s
10989}
10990
10991// SetContentDisposition sets the ContentDisposition field's value.
10992func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput {
10993 s.ContentDisposition = &v
10994 return s
10995}
10996
10997// SetContentEncoding sets the ContentEncoding field's value.
10998func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput {
10999 s.ContentEncoding = &v
11000 return s
11001}
11002
11003// SetContentLanguage sets the ContentLanguage field's value.
11004func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput {
11005 s.ContentLanguage = &v
11006 return s
11007}
11008
11009// SetContentLength sets the ContentLength field's value.
11010func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput {
11011 s.ContentLength = &v
11012 return s
11013}
11014
11015// SetContentRange sets the ContentRange field's value.
11016func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput {
11017 s.ContentRange = &v
11018 return s
11019}
11020
11021// SetContentType sets the ContentType field's value.
11022func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput {
11023 s.ContentType = &v
11024 return s
11025}
11026
11027// SetDeleteMarker sets the DeleteMarker field's value.
11028func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput {
11029 s.DeleteMarker = &v
11030 return s
11031}
11032
11033// SetETag sets the ETag field's value.
11034func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput {
11035 s.ETag = &v
11036 return s
11037}
11038
11039// SetExpiration sets the Expiration field's value.
11040func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput {
11041 s.Expiration = &v
11042 return s
11043}
11044
11045// SetExpires sets the Expires field's value.
11046func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput {
11047 s.Expires = &v
11048 return s
11049}
11050
11051// SetLastModified sets the LastModified field's value.
11052func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput {
11053 s.LastModified = &v
11054 return s
11055}
11056
11057// SetMetadata sets the Metadata field's value.
11058func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput {
11059 s.Metadata = v
11060 return s
11061}
11062
11063// SetMissingMeta sets the MissingMeta field's value.
11064func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput {
11065 s.MissingMeta = &v
11066 return s
11067}
11068
11069// SetPartsCount sets the PartsCount field's value.
11070func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput {
11071 s.PartsCount = &v
11072 return s
11073}
11074
11075// SetReplicationStatus sets the ReplicationStatus field's value.
11076func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput {
11077 s.ReplicationStatus = &v
11078 return s
11079}
11080
11081// SetRequestCharged sets the RequestCharged field's value.
11082func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput {
11083 s.RequestCharged = &v
11084 return s
11085}
11086
11087// SetRestore sets the Restore field's value.
11088func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput {
11089 s.Restore = &v
11090 return s
11091}
11092
11093// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
11094func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput {
11095 s.SSECustomerAlgorithm = &v
11096 return s
11097}
11098
11099// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
11100func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput {
11101 s.SSECustomerKeyMD5 = &v
11102 return s
11103}
11104
11105// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
11106func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput {
11107 s.SSEKMSKeyId = &v
11108 return s
11109}
11110
11111// SetServerSideEncryption sets the ServerSideEncryption field's value.
11112func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput {
11113 s.ServerSideEncryption = &v
11114 return s
11115}
11116
11117// SetStorageClass sets the StorageClass field's value.
11118func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput {
11119 s.StorageClass = &v
11120 return s
11121}
11122
11123// SetTagCount sets the TagCount field's value.
11124func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput {
11125 s.TagCount = &v
11126 return s
11127}
11128
11129// SetVersionId sets the VersionId field's value.
11130func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput {
11131 s.VersionId = &v
11132 return s
11133}
11134
11135// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
11136func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput {
11137 s.WebsiteRedirectLocation = &v
11138 return s
11139}
11140
11141// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest
11142type GetObjectTaggingInput struct {
11143 _ struct{} `type:"structure"`
11144
11145 // Bucket is a required field
11146 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
11147
11148 // Key is a required field
11149 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
11150
11151 VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
11152}
11153
11154// String returns the string representation
11155func (s GetObjectTaggingInput) String() string {
11156 return awsutil.Prettify(s)
11157}
11158
11159// GoString returns the string representation
11160func (s GetObjectTaggingInput) GoString() string {
11161 return s.String()
11162}
11163
11164// Validate inspects the fields of the type to determine if they are valid.
11165func (s *GetObjectTaggingInput) Validate() error {
11166 invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"}
11167 if s.Bucket == nil {
11168 invalidParams.Add(request.NewErrParamRequired("Bucket"))
11169 }
11170 if s.Key == nil {
11171 invalidParams.Add(request.NewErrParamRequired("Key"))
11172 }
11173 if s.Key != nil && len(*s.Key) < 1 {
11174 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
11175 }
11176
11177 if invalidParams.Len() > 0 {
11178 return invalidParams
11179 }
11180 return nil
11181}
11182
11183// SetBucket sets the Bucket field's value.
11184func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput {
11185 s.Bucket = &v
11186 return s
11187}
11188
11189// SetKey sets the Key field's value.
11190func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput {
11191 s.Key = &v
11192 return s
11193}
11194
11195// SetVersionId sets the VersionId field's value.
11196func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput {
11197 s.VersionId = &v
11198 return s
11199}
11200
11201// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput
11202type GetObjectTaggingOutput struct {
11203 _ struct{} `type:"structure"`
11204
11205 // TagSet is a required field
11206 TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
11207
11208 VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
11209}
11210
11211// String returns the string representation
11212func (s GetObjectTaggingOutput) String() string {
11213 return awsutil.Prettify(s)
11214}
11215
11216// GoString returns the string representation
11217func (s GetObjectTaggingOutput) GoString() string {
11218 return s.String()
11219}
11220
11221// SetTagSet sets the TagSet field's value.
11222func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput {
11223 s.TagSet = v
11224 return s
11225}
11226
11227// SetVersionId sets the VersionId field's value.
11228func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput {
11229 s.VersionId = &v
11230 return s
11231}
11232
11233// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest
11234type GetObjectTorrentInput struct {
11235 _ struct{} `type:"structure"`
11236
11237 // Bucket is a required field
11238 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
11239
11240 // Key is a required field
11241 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
11242
11243 // Confirms that the requester knows that she or he will be charged for the
11244 // request. Bucket owners need not specify this parameter in their requests.
11245 // Documentation on downloading objects from requester pays buckets can be found
11246 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
11247 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
11248}
11249
11250// String returns the string representation
11251func (s GetObjectTorrentInput) String() string {
11252 return awsutil.Prettify(s)
11253}
11254
11255// GoString returns the string representation
11256func (s GetObjectTorrentInput) GoString() string {
11257 return s.String()
11258}
11259
11260// Validate inspects the fields of the type to determine if they are valid.
11261func (s *GetObjectTorrentInput) Validate() error {
11262 invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"}
11263 if s.Bucket == nil {
11264 invalidParams.Add(request.NewErrParamRequired("Bucket"))
11265 }
11266 if s.Key == nil {
11267 invalidParams.Add(request.NewErrParamRequired("Key"))
11268 }
11269 if s.Key != nil && len(*s.Key) < 1 {
11270 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
11271 }
11272
11273 if invalidParams.Len() > 0 {
11274 return invalidParams
11275 }
11276 return nil
11277}
11278
11279// SetBucket sets the Bucket field's value.
11280func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput {
11281 s.Bucket = &v
11282 return s
11283}
11284
11285// SetKey sets the Key field's value.
11286func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput {
11287 s.Key = &v
11288 return s
11289}
11290
11291// SetRequestPayer sets the RequestPayer field's value.
11292func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput {
11293 s.RequestPayer = &v
11294 return s
11295}
11296
11297// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput
11298type GetObjectTorrentOutput struct {
11299 _ struct{} `type:"structure" payload:"Body"`
11300
11301 Body io.ReadCloser `type:"blob"`
11302
11303 // If present, indicates that the requester was successfully charged for the
11304 // request.
11305 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
11306}
11307
11308// String returns the string representation
11309func (s GetObjectTorrentOutput) String() string {
11310 return awsutil.Prettify(s)
11311}
11312
11313// GoString returns the string representation
11314func (s GetObjectTorrentOutput) GoString() string {
11315 return s.String()
11316}
11317
11318// SetBody sets the Body field's value.
11319func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput {
11320 s.Body = v
11321 return s
11322}
11323
11324// SetRequestCharged sets the RequestCharged field's value.
11325func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput {
11326 s.RequestCharged = &v
11327 return s
11328}
11329
11330// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters
11331type GlacierJobParameters struct {
11332 _ struct{} `type:"structure"`
11333
11334 // Glacier retrieval tier at which the restore will be processed.
11335 //
11336 // Tier is a required field
11337 Tier *string `type:"string" required:"true" enum:"Tier"`
11338}
11339
11340// String returns the string representation
11341func (s GlacierJobParameters) String() string {
11342 return awsutil.Prettify(s)
11343}
11344
11345// GoString returns the string representation
11346func (s GlacierJobParameters) GoString() string {
11347 return s.String()
11348}
11349
11350// Validate inspects the fields of the type to determine if they are valid.
11351func (s *GlacierJobParameters) Validate() error {
11352 invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"}
11353 if s.Tier == nil {
11354 invalidParams.Add(request.NewErrParamRequired("Tier"))
11355 }
11356
11357 if invalidParams.Len() > 0 {
11358 return invalidParams
11359 }
11360 return nil
11361}
11362
11363// SetTier sets the Tier field's value.
11364func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters {
11365 s.Tier = &v
11366 return s
11367}
11368
11369// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant
11370type Grant struct {
11371 _ struct{} `type:"structure"`
11372
11373 Grantee *Grantee `type:"structure"`
11374
11375 // Specifies the permission given to the grantee.
11376 Permission *string `type:"string" enum:"Permission"`
11377}
11378
11379// String returns the string representation
11380func (s Grant) String() string {
11381 return awsutil.Prettify(s)
11382}
11383
11384// GoString returns the string representation
11385func (s Grant) GoString() string {
11386 return s.String()
11387}
11388
11389// Validate inspects the fields of the type to determine if they are valid.
11390func (s *Grant) Validate() error {
11391 invalidParams := request.ErrInvalidParams{Context: "Grant"}
11392 if s.Grantee != nil {
11393 if err := s.Grantee.Validate(); err != nil {
11394 invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams))
11395 }
11396 }
11397
11398 if invalidParams.Len() > 0 {
11399 return invalidParams
11400 }
11401 return nil
11402}
11403
11404// SetGrantee sets the Grantee field's value.
11405func (s *Grant) SetGrantee(v *Grantee) *Grant {
11406 s.Grantee = v
11407 return s
11408}
11409
11410// SetPermission sets the Permission field's value.
11411func (s *Grant) SetPermission(v string) *Grant {
11412 s.Permission = &v
11413 return s
11414}
11415
11416// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee
11417type Grantee struct {
11418 _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
11419
11420 // Screen name of the grantee.
11421 DisplayName *string `type:"string"`
11422
11423 // Email address of the grantee.
11424 EmailAddress *string `type:"string"`
11425
11426 // The canonical user ID of the grantee.
11427 ID *string `type:"string"`
11428
11429 // Type of grantee
11430 //
11431 // Type is a required field
11432 Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"`
11433
11434 // URI of the grantee group.
11435 URI *string `type:"string"`
11436}
11437
11438// String returns the string representation
11439func (s Grantee) String() string {
11440 return awsutil.Prettify(s)
11441}
11442
11443// GoString returns the string representation
11444func (s Grantee) GoString() string {
11445 return s.String()
11446}
11447
11448// Validate inspects the fields of the type to determine if they are valid.
11449func (s *Grantee) Validate() error {
11450 invalidParams := request.ErrInvalidParams{Context: "Grantee"}
11451 if s.Type == nil {
11452 invalidParams.Add(request.NewErrParamRequired("Type"))
11453 }
11454
11455 if invalidParams.Len() > 0 {
11456 return invalidParams
11457 }
11458 return nil
11459}
11460
11461// SetDisplayName sets the DisplayName field's value.
11462func (s *Grantee) SetDisplayName(v string) *Grantee {
11463 s.DisplayName = &v
11464 return s
11465}
11466
11467// SetEmailAddress sets the EmailAddress field's value.
11468func (s *Grantee) SetEmailAddress(v string) *Grantee {
11469 s.EmailAddress = &v
11470 return s
11471}
11472
11473// SetID sets the ID field's value.
11474func (s *Grantee) SetID(v string) *Grantee {
11475 s.ID = &v
11476 return s
11477}
11478
11479// SetType sets the Type field's value.
11480func (s *Grantee) SetType(v string) *Grantee {
11481 s.Type = &v
11482 return s
11483}
11484
11485// SetURI sets the URI field's value.
11486func (s *Grantee) SetURI(v string) *Grantee {
11487 s.URI = &v
11488 return s
11489}
11490
11491// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest
11492type HeadBucketInput struct {
11493 _ struct{} `type:"structure"`
11494
11495 // Bucket is a required field
11496 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
11497}
11498
11499// String returns the string representation
11500func (s HeadBucketInput) String() string {
11501 return awsutil.Prettify(s)
11502}
11503
11504// GoString returns the string representation
11505func (s HeadBucketInput) GoString() string {
11506 return s.String()
11507}
11508
11509// Validate inspects the fields of the type to determine if they are valid.
11510func (s *HeadBucketInput) Validate() error {
11511 invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"}
11512 if s.Bucket == nil {
11513 invalidParams.Add(request.NewErrParamRequired("Bucket"))
11514 }
11515
11516 if invalidParams.Len() > 0 {
11517 return invalidParams
11518 }
11519 return nil
11520}
11521
11522// SetBucket sets the Bucket field's value.
11523func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput {
11524 s.Bucket = &v
11525 return s
11526}
11527
11528// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput
11529type HeadBucketOutput struct {
11530 _ struct{} `type:"structure"`
11531}
11532
11533// String returns the string representation
11534func (s HeadBucketOutput) String() string {
11535 return awsutil.Prettify(s)
11536}
11537
11538// GoString returns the string representation
11539func (s HeadBucketOutput) GoString() string {
11540 return s.String()
11541}
11542
11543// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest
11544type HeadObjectInput struct {
11545 _ struct{} `type:"structure"`
11546
11547 // Bucket is a required field
11548 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
11549
11550 // Return the object only if its entity tag (ETag) is the same as the one specified,
11551 // otherwise return a 412 (precondition failed).
11552 IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
11553
11554 // Return the object only if it has been modified since the specified time,
11555 // otherwise return a 304 (not modified).
11556 IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
11557
11558 // Return the object only if its entity tag (ETag) is different from the one
11559 // specified, otherwise return a 304 (not modified).
11560 IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
11561
11562 // Return the object only if it has not been modified since the specified time,
11563 // otherwise return a 412 (precondition failed).
11564 IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
11565
11566 // Key is a required field
11567 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
11568
11569 // Part number of the object being read. This is a positive integer between
11570 // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified.
11571 // Useful querying about the size of the part and the number of parts in this
11572 // object.
11573 PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"`
11574
11575 // Downloads the specified range bytes of an object. For more information about
11576 // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
11577 Range *string `location:"header" locationName:"Range" type:"string"`
11578
11579 // Confirms that the requester knows that she or he will be charged for the
11580 // request. Bucket owners need not specify this parameter in their requests.
11581 // Documentation on downloading objects from requester pays buckets can be found
11582 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
11583 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
11584
11585 // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
11586 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
11587
11588 // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
11589 // data. This value is used to store the object and then it is discarded; Amazon
11590 // does not store the encryption key. The key must be appropriate for use with
11591 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
11592 // header.
11593 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
11594
11595 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
11596 // Amazon S3 uses this header for a message integrity check to ensure the encryption
11597 // key was transmitted without error.
11598 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
11599
11600 // VersionId used to reference a specific version of the object.
11601 VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
11602}
11603
11604// String returns the string representation
11605func (s HeadObjectInput) String() string {
11606 return awsutil.Prettify(s)
11607}
11608
11609// GoString returns the string representation
11610func (s HeadObjectInput) GoString() string {
11611 return s.String()
11612}
11613
11614// Validate inspects the fields of the type to determine if they are valid.
11615func (s *HeadObjectInput) Validate() error {
11616 invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"}
11617 if s.Bucket == nil {
11618 invalidParams.Add(request.NewErrParamRequired("Bucket"))
11619 }
11620 if s.Key == nil {
11621 invalidParams.Add(request.NewErrParamRequired("Key"))
11622 }
11623 if s.Key != nil && len(*s.Key) < 1 {
11624 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
11625 }
11626
11627 if invalidParams.Len() > 0 {
11628 return invalidParams
11629 }
11630 return nil
11631}
11632
11633// SetBucket sets the Bucket field's value.
11634func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput {
11635 s.Bucket = &v
11636 return s
11637}
11638
11639// SetIfMatch sets the IfMatch field's value.
11640func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput {
11641 s.IfMatch = &v
11642 return s
11643}
11644
11645// SetIfModifiedSince sets the IfModifiedSince field's value.
11646func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput {
11647 s.IfModifiedSince = &v
11648 return s
11649}
11650
11651// SetIfNoneMatch sets the IfNoneMatch field's value.
11652func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput {
11653 s.IfNoneMatch = &v
11654 return s
11655}
11656
11657// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value.
11658func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput {
11659 s.IfUnmodifiedSince = &v
11660 return s
11661}
11662
11663// SetKey sets the Key field's value.
11664func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput {
11665 s.Key = &v
11666 return s
11667}
11668
11669// SetPartNumber sets the PartNumber field's value.
11670func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput {
11671 s.PartNumber = &v
11672 return s
11673}
11674
11675// SetRange sets the Range field's value.
11676func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput {
11677 s.Range = &v
11678 return s
11679}
11680
11681// SetRequestPayer sets the RequestPayer field's value.
11682func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput {
11683 s.RequestPayer = &v
11684 return s
11685}
11686
11687// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
11688func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput {
11689 s.SSECustomerAlgorithm = &v
11690 return s
11691}
11692
11693// SetSSECustomerKey sets the SSECustomerKey field's value.
11694func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput {
11695 s.SSECustomerKey = &v
11696 return s
11697}
11698
11699// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
11700func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput {
11701 s.SSECustomerKeyMD5 = &v
11702 return s
11703}
11704
11705// SetVersionId sets the VersionId field's value.
11706func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput {
11707 s.VersionId = &v
11708 return s
11709}
11710
11711// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput
11712type HeadObjectOutput struct {
11713 _ struct{} `type:"structure"`
11714
11715 AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
11716
11717 // Specifies caching behavior along the request/reply chain.
11718 CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
11719
11720 // Specifies presentational information for the object.
11721 ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
11722
11723 // Specifies what content encodings have been applied to the object and thus
11724 // what decoding mechanisms must be applied to obtain the media-type referenced
11725 // by the Content-Type header field.
11726 ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
11727
11728 // The language the content is in.
11729 ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
11730
11731 // Size of the body in bytes.
11732 ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
11733
11734 // A standard MIME type describing the format of the object data.
11735 ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
11736
11737 // Specifies whether the object retrieved was (true) or was not (false) a Delete
11738 // Marker. If false, this response header does not appear in the response.
11739 DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
11740
11741 // An ETag is an opaque identifier assigned by a web server to a specific version
11742 // of a resource found at a URL
11743 ETag *string `location:"header" locationName:"ETag" type:"string"`
11744
11745 // If the object expiration is configured (see PUT Bucket lifecycle), the response
11746 // includes this header. It includes the expiry-date and rule-id key value pairs
11747 // providing object expiration information. The value of the rule-id is URL
11748 // encoded.
11749 Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
11750
11751 // The date and time at which the object is no longer cacheable.
11752 Expires *string `location:"header" locationName:"Expires" type:"string"`
11753
11754 // Last modified date of the object
11755 LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
11756
11757 // A map of metadata to store with the object in S3.
11758 Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
11759
11760 // This is set to the number of metadata entries not returned in x-amz-meta
11761 // headers. This can happen if you create metadata using an API like SOAP that
11762 // supports more flexible metadata than the REST API. For example, using SOAP,
11763 // you can create metadata whose values are not legal HTTP headers.
11764 MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
11765
11766 // The count of parts this object has.
11767 PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"`
11768
11769 ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
11770
11771 // If present, indicates that the requester was successfully charged for the
11772 // request.
11773 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
11774
11775 // Provides information about object restoration operation and expiration time
11776 // of the restored object copy.
11777 Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
11778
11779 // If server-side encryption with a customer-provided encryption key was requested,
11780 // the response will include this header confirming the encryption algorithm
11781 // used.
11782 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
11783
11784 // If server-side encryption with a customer-provided encryption key was requested,
11785 // the response will include this header to provide round trip message integrity
11786 // verification of the customer-provided encryption key.
11787 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
11788
11789 // If present, specifies the ID of the AWS Key Management Service (KMS) master
11790 // encryption key that was used for the object.
11791 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
11792
11793 // The Server-side encryption algorithm used when storing this object in S3
11794 // (e.g., AES256, aws:kms).
11795 ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
11796
11797 StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
11798
11799 // Version of the object.
11800 VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
11801
11802 // If the bucket is configured as a website, redirects requests for this object
11803 // to another object in the same bucket or to an external URL. Amazon S3 stores
11804 // the value of this header in the object metadata.
11805 WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
11806}
11807
11808// String returns the string representation
11809func (s HeadObjectOutput) String() string {
11810 return awsutil.Prettify(s)
11811}
11812
11813// GoString returns the string representation
11814func (s HeadObjectOutput) GoString() string {
11815 return s.String()
11816}
11817
11818// SetAcceptRanges sets the AcceptRanges field's value.
11819func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput {
11820 s.AcceptRanges = &v
11821 return s
11822}
11823
11824// SetCacheControl sets the CacheControl field's value.
11825func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput {
11826 s.CacheControl = &v
11827 return s
11828}
11829
11830// SetContentDisposition sets the ContentDisposition field's value.
11831func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput {
11832 s.ContentDisposition = &v
11833 return s
11834}
11835
11836// SetContentEncoding sets the ContentEncoding field's value.
11837func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput {
11838 s.ContentEncoding = &v
11839 return s
11840}
11841
11842// SetContentLanguage sets the ContentLanguage field's value.
11843func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput {
11844 s.ContentLanguage = &v
11845 return s
11846}
11847
11848// SetContentLength sets the ContentLength field's value.
11849func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput {
11850 s.ContentLength = &v
11851 return s
11852}
11853
11854// SetContentType sets the ContentType field's value.
11855func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput {
11856 s.ContentType = &v
11857 return s
11858}
11859
11860// SetDeleteMarker sets the DeleteMarker field's value.
11861func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput {
11862 s.DeleteMarker = &v
11863 return s
11864}
11865
11866// SetETag sets the ETag field's value.
11867func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput {
11868 s.ETag = &v
11869 return s
11870}
11871
11872// SetExpiration sets the Expiration field's value.
11873func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput {
11874 s.Expiration = &v
11875 return s
11876}
11877
11878// SetExpires sets the Expires field's value.
11879func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput {
11880 s.Expires = &v
11881 return s
11882}
11883
11884// SetLastModified sets the LastModified field's value.
11885func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput {
11886 s.LastModified = &v
11887 return s
11888}
11889
11890// SetMetadata sets the Metadata field's value.
11891func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput {
11892 s.Metadata = v
11893 return s
11894}
11895
11896// SetMissingMeta sets the MissingMeta field's value.
11897func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput {
11898 s.MissingMeta = &v
11899 return s
11900}
11901
11902// SetPartsCount sets the PartsCount field's value.
11903func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput {
11904 s.PartsCount = &v
11905 return s
11906}
11907
11908// SetReplicationStatus sets the ReplicationStatus field's value.
11909func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput {
11910 s.ReplicationStatus = &v
11911 return s
11912}
11913
11914// SetRequestCharged sets the RequestCharged field's value.
11915func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput {
11916 s.RequestCharged = &v
11917 return s
11918}
11919
11920// SetRestore sets the Restore field's value.
11921func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput {
11922 s.Restore = &v
11923 return s
11924}
11925
11926// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
11927func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput {
11928 s.SSECustomerAlgorithm = &v
11929 return s
11930}
11931
11932// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
11933func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput {
11934 s.SSECustomerKeyMD5 = &v
11935 return s
11936}
11937
11938// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
11939func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput {
11940 s.SSEKMSKeyId = &v
11941 return s
11942}
11943
11944// SetServerSideEncryption sets the ServerSideEncryption field's value.
11945func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput {
11946 s.ServerSideEncryption = &v
11947 return s
11948}
11949
11950// SetStorageClass sets the StorageClass field's value.
11951func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput {
11952 s.StorageClass = &v
11953 return s
11954}
11955
11956// SetVersionId sets the VersionId field's value.
11957func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput {
11958 s.VersionId = &v
11959 return s
11960}
11961
11962// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
11963func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput {
11964 s.WebsiteRedirectLocation = &v
11965 return s
11966}
11967
11968// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument
11969type IndexDocument struct {
11970 _ struct{} `type:"structure"`
11971
11972 // A suffix that is appended to a request that is for a directory on the website
11973 // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/
11974 // the data that is returned will be for the object with the key name images/index.html)
11975 // The suffix must not be empty and must not include a slash character.
11976 //
11977 // Suffix is a required field
11978 Suffix *string `type:"string" required:"true"`
11979}
11980
11981// String returns the string representation
11982func (s IndexDocument) String() string {
11983 return awsutil.Prettify(s)
11984}
11985
11986// GoString returns the string representation
11987func (s IndexDocument) GoString() string {
11988 return s.String()
11989}
11990
11991// Validate inspects the fields of the type to determine if they are valid.
11992func (s *IndexDocument) Validate() error {
11993 invalidParams := request.ErrInvalidParams{Context: "IndexDocument"}
11994 if s.Suffix == nil {
11995 invalidParams.Add(request.NewErrParamRequired("Suffix"))
11996 }
11997
11998 if invalidParams.Len() > 0 {
11999 return invalidParams
12000 }
12001 return nil
12002}
12003
12004// SetSuffix sets the Suffix field's value.
12005func (s *IndexDocument) SetSuffix(v string) *IndexDocument {
12006 s.Suffix = &v
12007 return s
12008}
12009
12010// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator
12011type Initiator struct {
12012 _ struct{} `type:"structure"`
12013
12014 // Name of the Principal.
12015 DisplayName *string `type:"string"`
12016
12017 // If the principal is an AWS account, it provides the Canonical User ID. If
12018 // the principal is an IAM User, it provides a user ARN value.
12019 ID *string `type:"string"`
12020}
12021
12022// String returns the string representation
12023func (s Initiator) String() string {
12024 return awsutil.Prettify(s)
12025}
12026
12027// GoString returns the string representation
12028func (s Initiator) GoString() string {
12029 return s.String()
12030}
12031
12032// SetDisplayName sets the DisplayName field's value.
12033func (s *Initiator) SetDisplayName(v string) *Initiator {
12034 s.DisplayName = &v
12035 return s
12036}
12037
12038// SetID sets the ID field's value.
12039func (s *Initiator) SetID(v string) *Initiator {
12040 s.ID = &v
12041 return s
12042}
12043
12044// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration
12045type InventoryConfiguration struct {
12046 _ struct{} `type:"structure"`
12047
12048 // Contains information about where to publish the inventory results.
12049 //
12050 // Destination is a required field
12051 Destination *InventoryDestination `type:"structure" required:"true"`
12052
12053 // Specifies an inventory filter. The inventory only includes objects that meet
12054 // the filter's criteria.
12055 Filter *InventoryFilter `type:"structure"`
12056
12057 // The ID used to identify the inventory configuration.
12058 //
12059 // Id is a required field
12060 Id *string `type:"string" required:"true"`
12061
12062 // Specifies which object version(s) to included in the inventory results.
12063 //
12064 // IncludedObjectVersions is a required field
12065 IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"`
12066
12067 // Specifies whether the inventory is enabled or disabled.
12068 //
12069 // IsEnabled is a required field
12070 IsEnabled *bool `type:"boolean" required:"true"`
12071
12072 // Contains the optional fields that are included in the inventory results.
12073 OptionalFields []*string `locationNameList:"Field" type:"list"`
12074
12075 // Specifies the schedule for generating inventory results.
12076 //
12077 // Schedule is a required field
12078 Schedule *InventorySchedule `type:"structure" required:"true"`
12079}
12080
12081// String returns the string representation
12082func (s InventoryConfiguration) String() string {
12083 return awsutil.Prettify(s)
12084}
12085
12086// GoString returns the string representation
12087func (s InventoryConfiguration) GoString() string {
12088 return s.String()
12089}
12090
12091// Validate inspects the fields of the type to determine if they are valid.
12092func (s *InventoryConfiguration) Validate() error {
12093 invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"}
12094 if s.Destination == nil {
12095 invalidParams.Add(request.NewErrParamRequired("Destination"))
12096 }
12097 if s.Id == nil {
12098 invalidParams.Add(request.NewErrParamRequired("Id"))
12099 }
12100 if s.IncludedObjectVersions == nil {
12101 invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions"))
12102 }
12103 if s.IsEnabled == nil {
12104 invalidParams.Add(request.NewErrParamRequired("IsEnabled"))
12105 }
12106 if s.Schedule == nil {
12107 invalidParams.Add(request.NewErrParamRequired("Schedule"))
12108 }
12109 if s.Destination != nil {
12110 if err := s.Destination.Validate(); err != nil {
12111 invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
12112 }
12113 }
12114 if s.Filter != nil {
12115 if err := s.Filter.Validate(); err != nil {
12116 invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
12117 }
12118 }
12119 if s.Schedule != nil {
12120 if err := s.Schedule.Validate(); err != nil {
12121 invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams))
12122 }
12123 }
12124
12125 if invalidParams.Len() > 0 {
12126 return invalidParams
12127 }
12128 return nil
12129}
12130
12131// SetDestination sets the Destination field's value.
12132func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration {
12133 s.Destination = v
12134 return s
12135}
12136
12137// SetFilter sets the Filter field's value.
12138func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration {
12139 s.Filter = v
12140 return s
12141}
12142
12143// SetId sets the Id field's value.
12144func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration {
12145 s.Id = &v
12146 return s
12147}
12148
12149// SetIncludedObjectVersions sets the IncludedObjectVersions field's value.
12150func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration {
12151 s.IncludedObjectVersions = &v
12152 return s
12153}
12154
12155// SetIsEnabled sets the IsEnabled field's value.
12156func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration {
12157 s.IsEnabled = &v
12158 return s
12159}
12160
12161// SetOptionalFields sets the OptionalFields field's value.
12162func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration {
12163 s.OptionalFields = v
12164 return s
12165}
12166
12167// SetSchedule sets the Schedule field's value.
12168func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration {
12169 s.Schedule = v
12170 return s
12171}
12172
12173// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination
12174type InventoryDestination struct {
12175 _ struct{} `type:"structure"`
12176
12177 // Contains the bucket name, file format, bucket owner (optional), and prefix
12178 // (optional) where inventory results are published.
12179 //
12180 // S3BucketDestination is a required field
12181 S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"`
12182}
12183
12184// String returns the string representation
12185func (s InventoryDestination) String() string {
12186 return awsutil.Prettify(s)
12187}
12188
12189// GoString returns the string representation
12190func (s InventoryDestination) GoString() string {
12191 return s.String()
12192}
12193
12194// Validate inspects the fields of the type to determine if they are valid.
12195func (s *InventoryDestination) Validate() error {
12196 invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"}
12197 if s.S3BucketDestination == nil {
12198 invalidParams.Add(request.NewErrParamRequired("S3BucketDestination"))
12199 }
12200 if s.S3BucketDestination != nil {
12201 if err := s.S3BucketDestination.Validate(); err != nil {
12202 invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams))
12203 }
12204 }
12205
12206 if invalidParams.Len() > 0 {
12207 return invalidParams
12208 }
12209 return nil
12210}
12211
12212// SetS3BucketDestination sets the S3BucketDestination field's value.
12213func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination {
12214 s.S3BucketDestination = v
12215 return s
12216}
12217
12218// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter
12219type InventoryFilter struct {
12220 _ struct{} `type:"structure"`
12221
12222 // The prefix that an object must have to be included in the inventory results.
12223 //
12224 // Prefix is a required field
12225 Prefix *string `type:"string" required:"true"`
12226}
12227
12228// String returns the string representation
12229func (s InventoryFilter) String() string {
12230 return awsutil.Prettify(s)
12231}
12232
12233// GoString returns the string representation
12234func (s InventoryFilter) GoString() string {
12235 return s.String()
12236}
12237
12238// Validate inspects the fields of the type to determine if they are valid.
12239func (s *InventoryFilter) Validate() error {
12240 invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"}
12241 if s.Prefix == nil {
12242 invalidParams.Add(request.NewErrParamRequired("Prefix"))
12243 }
12244
12245 if invalidParams.Len() > 0 {
12246 return invalidParams
12247 }
12248 return nil
12249}
12250
12251// SetPrefix sets the Prefix field's value.
12252func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter {
12253 s.Prefix = &v
12254 return s
12255}
12256
12257// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination
12258type InventoryS3BucketDestination struct {
12259 _ struct{} `type:"structure"`
12260
12261 // The ID of the account that owns the destination bucket.
12262 AccountId *string `type:"string"`
12263
12264 // The Amazon resource name (ARN) of the bucket where inventory results will
12265 // be published.
12266 //
12267 // Bucket is a required field
12268 Bucket *string `type:"string" required:"true"`
12269
12270 // Specifies the output format of the inventory results.
12271 //
12272 // Format is a required field
12273 Format *string `type:"string" required:"true" enum:"InventoryFormat"`
12274
12275 // The prefix that is prepended to all inventory results.
12276 Prefix *string `type:"string"`
12277}
12278
12279// String returns the string representation
12280func (s InventoryS3BucketDestination) String() string {
12281 return awsutil.Prettify(s)
12282}
12283
12284// GoString returns the string representation
12285func (s InventoryS3BucketDestination) GoString() string {
12286 return s.String()
12287}
12288
12289// Validate inspects the fields of the type to determine if they are valid.
12290func (s *InventoryS3BucketDestination) Validate() error {
12291 invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"}
12292 if s.Bucket == nil {
12293 invalidParams.Add(request.NewErrParamRequired("Bucket"))
12294 }
12295 if s.Format == nil {
12296 invalidParams.Add(request.NewErrParamRequired("Format"))
12297 }
12298
12299 if invalidParams.Len() > 0 {
12300 return invalidParams
12301 }
12302 return nil
12303}
12304
12305// SetAccountId sets the AccountId field's value.
12306func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination {
12307 s.AccountId = &v
12308 return s
12309}
12310
12311// SetBucket sets the Bucket field's value.
12312func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination {
12313 s.Bucket = &v
12314 return s
12315}
12316
12317// SetFormat sets the Format field's value.
12318func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination {
12319 s.Format = &v
12320 return s
12321}
12322
12323// SetPrefix sets the Prefix field's value.
12324func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination {
12325 s.Prefix = &v
12326 return s
12327}
12328
12329// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule
12330type InventorySchedule struct {
12331 _ struct{} `type:"structure"`
12332
12333 // Specifies how frequently inventory results are produced.
12334 //
12335 // Frequency is a required field
12336 Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"`
12337}
12338
12339// String returns the string representation
12340func (s InventorySchedule) String() string {
12341 return awsutil.Prettify(s)
12342}
12343
12344// GoString returns the string representation
12345func (s InventorySchedule) GoString() string {
12346 return s.String()
12347}
12348
12349// Validate inspects the fields of the type to determine if they are valid.
12350func (s *InventorySchedule) Validate() error {
12351 invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"}
12352 if s.Frequency == nil {
12353 invalidParams.Add(request.NewErrParamRequired("Frequency"))
12354 }
12355
12356 if invalidParams.Len() > 0 {
12357 return invalidParams
12358 }
12359 return nil
12360}
12361
12362// SetFrequency sets the Frequency field's value.
12363func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule {
12364 s.Frequency = &v
12365 return s
12366}
12367
12368// Container for object key name prefix and suffix filtering rules.
12369// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter
12370type KeyFilter struct {
12371 _ struct{} `type:"structure"`
12372
12373 // A list of containers for key value pair that defines the criteria for the
12374 // filter rule.
12375 FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"`
12376}
12377
12378// String returns the string representation
12379func (s KeyFilter) String() string {
12380 return awsutil.Prettify(s)
12381}
12382
12383// GoString returns the string representation
12384func (s KeyFilter) GoString() string {
12385 return s.String()
12386}
12387
12388// SetFilterRules sets the FilterRules field's value.
12389func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter {
12390 s.FilterRules = v
12391 return s
12392}
12393
12394// Container for specifying the AWS Lambda notification configuration.
12395// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration
12396type LambdaFunctionConfiguration struct {
12397 _ struct{} `type:"structure"`
12398
12399 // Events is a required field
12400 Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
12401
12402 // Container for object key name filtering rules. For information about key
12403 // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
12404 Filter *NotificationConfigurationFilter `type:"structure"`
12405
12406 // Optional unique identifier for configurations in a notification configuration.
12407 // If you don't provide one, Amazon S3 will assign an ID.
12408 Id *string `type:"string"`
12409
12410 // Lambda cloud function ARN that Amazon S3 can invoke when it detects events
12411 // of the specified type.
12412 //
12413 // LambdaFunctionArn is a required field
12414 LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"`
12415}
12416
12417// String returns the string representation
12418func (s LambdaFunctionConfiguration) String() string {
12419 return awsutil.Prettify(s)
12420}
12421
12422// GoString returns the string representation
12423func (s LambdaFunctionConfiguration) GoString() string {
12424 return s.String()
12425}
12426
12427// Validate inspects the fields of the type to determine if they are valid.
12428func (s *LambdaFunctionConfiguration) Validate() error {
12429 invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"}
12430 if s.Events == nil {
12431 invalidParams.Add(request.NewErrParamRequired("Events"))
12432 }
12433 if s.LambdaFunctionArn == nil {
12434 invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn"))
12435 }
12436
12437 if invalidParams.Len() > 0 {
12438 return invalidParams
12439 }
12440 return nil
12441}
12442
12443// SetEvents sets the Events field's value.
12444func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration {
12445 s.Events = v
12446 return s
12447}
12448
12449// SetFilter sets the Filter field's value.
12450func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration {
12451 s.Filter = v
12452 return s
12453}
12454
12455// SetId sets the Id field's value.
12456func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration {
12457 s.Id = &v
12458 return s
12459}
12460
12461// SetLambdaFunctionArn sets the LambdaFunctionArn field's value.
12462func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration {
12463 s.LambdaFunctionArn = &v
12464 return s
12465}
12466
12467// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration
12468type LifecycleConfiguration struct {
12469 _ struct{} `type:"structure"`
12470
12471 // Rules is a required field
12472 Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
12473}
12474
12475// String returns the string representation
12476func (s LifecycleConfiguration) String() string {
12477 return awsutil.Prettify(s)
12478}
12479
12480// GoString returns the string representation
12481func (s LifecycleConfiguration) GoString() string {
12482 return s.String()
12483}
12484
12485// Validate inspects the fields of the type to determine if they are valid.
12486func (s *LifecycleConfiguration) Validate() error {
12487 invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"}
12488 if s.Rules == nil {
12489 invalidParams.Add(request.NewErrParamRequired("Rules"))
12490 }
12491 if s.Rules != nil {
12492 for i, v := range s.Rules {
12493 if v == nil {
12494 continue
12495 }
12496 if err := v.Validate(); err != nil {
12497 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
12498 }
12499 }
12500 }
12501
12502 if invalidParams.Len() > 0 {
12503 return invalidParams
12504 }
12505 return nil
12506}
12507
12508// SetRules sets the Rules field's value.
12509func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration {
12510 s.Rules = v
12511 return s
12512}
12513
12514// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration
12515type LifecycleExpiration struct {
12516 _ struct{} `type:"structure"`
12517
12518 // Indicates at what date the object is to be moved or deleted. Should be in
12519 // GMT ISO 8601 Format.
12520 Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
12521
12522 // Indicates the lifetime, in days, of the objects that are subject to the rule.
12523 // The value must be a non-zero positive integer.
12524 Days *int64 `type:"integer"`
12525
12526 // Indicates whether Amazon S3 will remove a delete marker with no noncurrent
12527 // versions. If set to true, the delete marker will be expired; if set to false
12528 // the policy takes no action. This cannot be specified with Days or Date in
12529 // a Lifecycle Expiration Policy.
12530 ExpiredObjectDeleteMarker *bool `type:"boolean"`
12531}
12532
12533// String returns the string representation
12534func (s LifecycleExpiration) String() string {
12535 return awsutil.Prettify(s)
12536}
12537
12538// GoString returns the string representation
12539func (s LifecycleExpiration) GoString() string {
12540 return s.String()
12541}
12542
12543// SetDate sets the Date field's value.
12544func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration {
12545 s.Date = &v
12546 return s
12547}
12548
12549// SetDays sets the Days field's value.
12550func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration {
12551 s.Days = &v
12552 return s
12553}
12554
12555// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value.
12556func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration {
12557 s.ExpiredObjectDeleteMarker = &v
12558 return s
12559}
12560
12561// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule
12562type LifecycleRule struct {
12563 _ struct{} `type:"structure"`
12564
12565 // Specifies the days since the initiation of an Incomplete Multipart Upload
12566 // that Lifecycle will wait before permanently removing all parts of the upload.
12567 AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
12568
12569 Expiration *LifecycleExpiration `type:"structure"`
12570
12571 // The Filter is used to identify objects that a Lifecycle Rule applies to.
12572 // A Filter must have exactly one of Prefix, Tag, or And specified.
12573 Filter *LifecycleRuleFilter `type:"structure"`
12574
12575 // Unique identifier for the rule. The value cannot be longer than 255 characters.
12576 ID *string `type:"string"`
12577
12578 // Specifies when noncurrent object versions expire. Upon expiration, Amazon
12579 // S3 permanently deletes the noncurrent object versions. You set this lifecycle
12580 // configuration action on a bucket that has versioning enabled (or suspended)
12581 // to request that Amazon S3 delete noncurrent object versions at a specific
12582 // period in the object's lifetime.
12583 NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
12584
12585 NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"`
12586
12587 // Prefix identifying one or more objects to which the rule applies. This is
12588 // deprecated; use Filter instead.
12589 Prefix *string `deprecated:"true" type:"string"`
12590
12591 // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
12592 // is not currently being applied.
12593 //
12594 // Status is a required field
12595 Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
12596
12597 Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"`
12598}
12599
12600// String returns the string representation
12601func (s LifecycleRule) String() string {
12602 return awsutil.Prettify(s)
12603}
12604
12605// GoString returns the string representation
12606func (s LifecycleRule) GoString() string {
12607 return s.String()
12608}
12609
12610// Validate inspects the fields of the type to determine if they are valid.
12611func (s *LifecycleRule) Validate() error {
12612 invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"}
12613 if s.Status == nil {
12614 invalidParams.Add(request.NewErrParamRequired("Status"))
12615 }
12616 if s.Filter != nil {
12617 if err := s.Filter.Validate(); err != nil {
12618 invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
12619 }
12620 }
12621
12622 if invalidParams.Len() > 0 {
12623 return invalidParams
12624 }
12625 return nil
12626}
12627
12628// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value.
12629func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule {
12630 s.AbortIncompleteMultipartUpload = v
12631 return s
12632}
12633
12634// SetExpiration sets the Expiration field's value.
12635func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule {
12636 s.Expiration = v
12637 return s
12638}
12639
12640// SetFilter sets the Filter field's value.
12641func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule {
12642 s.Filter = v
12643 return s
12644}
12645
12646// SetID sets the ID field's value.
12647func (s *LifecycleRule) SetID(v string) *LifecycleRule {
12648 s.ID = &v
12649 return s
12650}
12651
12652// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value.
12653func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule {
12654 s.NoncurrentVersionExpiration = v
12655 return s
12656}
12657
12658// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value.
12659func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule {
12660 s.NoncurrentVersionTransitions = v
12661 return s
12662}
12663
12664// SetPrefix sets the Prefix field's value.
12665func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule {
12666 s.Prefix = &v
12667 return s
12668}
12669
12670// SetStatus sets the Status field's value.
12671func (s *LifecycleRule) SetStatus(v string) *LifecycleRule {
12672 s.Status = &v
12673 return s
12674}
12675
12676// SetTransitions sets the Transitions field's value.
12677func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule {
12678 s.Transitions = v
12679 return s
12680}
12681
12682// This is used in a Lifecycle Rule Filter to apply a logical AND to two or
12683// more predicates. The Lifecycle Rule will apply to any object matching all
12684// of the predicates configured inside the And operator.
12685// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator
12686type LifecycleRuleAndOperator struct {
12687 _ struct{} `type:"structure"`
12688
12689 Prefix *string `type:"string"`
12690
12691 // All of these tags must exist in the object's tag set in order for the rule
12692 // to apply.
12693 Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
12694}
12695
12696// String returns the string representation
12697func (s LifecycleRuleAndOperator) String() string {
12698 return awsutil.Prettify(s)
12699}
12700
12701// GoString returns the string representation
12702func (s LifecycleRuleAndOperator) GoString() string {
12703 return s.String()
12704}
12705
12706// Validate inspects the fields of the type to determine if they are valid.
12707func (s *LifecycleRuleAndOperator) Validate() error {
12708 invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"}
12709 if s.Tags != nil {
12710 for i, v := range s.Tags {
12711 if v == nil {
12712 continue
12713 }
12714 if err := v.Validate(); err != nil {
12715 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
12716 }
12717 }
12718 }
12719
12720 if invalidParams.Len() > 0 {
12721 return invalidParams
12722 }
12723 return nil
12724}
12725
12726// SetPrefix sets the Prefix field's value.
12727func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator {
12728 s.Prefix = &v
12729 return s
12730}
12731
12732// SetTags sets the Tags field's value.
12733func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator {
12734 s.Tags = v
12735 return s
12736}
12737
12738// The Filter is used to identify objects that a Lifecycle Rule applies to.
12739// A Filter must have exactly one of Prefix, Tag, or And specified.
12740// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter
12741type LifecycleRuleFilter struct {
12742 _ struct{} `type:"structure"`
12743
12744 // This is used in a Lifecycle Rule Filter to apply a logical AND to two or
12745 // more predicates. The Lifecycle Rule will apply to any object matching all
12746 // of the predicates configured inside the And operator.
12747 And *LifecycleRuleAndOperator `type:"structure"`
12748
12749 // Prefix identifying one or more objects to which the rule applies.
12750 Prefix *string `type:"string"`
12751
12752 // This tag must exist in the object's tag set in order for the rule to apply.
12753 Tag *Tag `type:"structure"`
12754}
12755
12756// String returns the string representation
12757func (s LifecycleRuleFilter) String() string {
12758 return awsutil.Prettify(s)
12759}
12760
12761// GoString returns the string representation
12762func (s LifecycleRuleFilter) GoString() string {
12763 return s.String()
12764}
12765
12766// Validate inspects the fields of the type to determine if they are valid.
12767func (s *LifecycleRuleFilter) Validate() error {
12768 invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"}
12769 if s.And != nil {
12770 if err := s.And.Validate(); err != nil {
12771 invalidParams.AddNested("And", err.(request.ErrInvalidParams))
12772 }
12773 }
12774 if s.Tag != nil {
12775 if err := s.Tag.Validate(); err != nil {
12776 invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
12777 }
12778 }
12779
12780 if invalidParams.Len() > 0 {
12781 return invalidParams
12782 }
12783 return nil
12784}
12785
12786// SetAnd sets the And field's value.
12787func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter {
12788 s.And = v
12789 return s
12790}
12791
12792// SetPrefix sets the Prefix field's value.
12793func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter {
12794 s.Prefix = &v
12795 return s
12796}
12797
12798// SetTag sets the Tag field's value.
12799func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter {
12800 s.Tag = v
12801 return s
12802}
12803
12804// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest
12805type ListBucketAnalyticsConfigurationsInput struct {
12806 _ struct{} `type:"structure"`
12807
12808 // The name of the bucket from which analytics configurations are retrieved.
12809 //
12810 // Bucket is a required field
12811 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
12812
12813 // The ContinuationToken that represents a placeholder from where this request
12814 // should begin.
12815 ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
12816}
12817
12818// String returns the string representation
12819func (s ListBucketAnalyticsConfigurationsInput) String() string {
12820 return awsutil.Prettify(s)
12821}
12822
12823// GoString returns the string representation
12824func (s ListBucketAnalyticsConfigurationsInput) GoString() string {
12825 return s.String()
12826}
12827
12828// Validate inspects the fields of the type to determine if they are valid.
12829func (s *ListBucketAnalyticsConfigurationsInput) Validate() error {
12830 invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"}
12831 if s.Bucket == nil {
12832 invalidParams.Add(request.NewErrParamRequired("Bucket"))
12833 }
12834
12835 if invalidParams.Len() > 0 {
12836 return invalidParams
12837 }
12838 return nil
12839}
12840
12841// SetBucket sets the Bucket field's value.
12842func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput {
12843 s.Bucket = &v
12844 return s
12845}
12846
12847// SetContinuationToken sets the ContinuationToken field's value.
12848func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput {
12849 s.ContinuationToken = &v
12850 return s
12851}
12852
12853// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput
12854type ListBucketAnalyticsConfigurationsOutput struct {
12855 _ struct{} `type:"structure"`
12856
12857 // The list of analytics configurations for a bucket.
12858 AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"`
12859
12860 // The ContinuationToken that represents where this request began.
12861 ContinuationToken *string `type:"string"`
12862
12863 // Indicates whether the returned list of analytics configurations is complete.
12864 // A value of true indicates that the list is not complete and the NextContinuationToken
12865 // will be provided for a subsequent request.
12866 IsTruncated *bool `type:"boolean"`
12867
12868 // NextContinuationToken is sent when isTruncated is true, which indicates that
12869 // there are more analytics configurations to list. The next request must include
12870 // this NextContinuationToken. The token is obfuscated and is not a usable value.
12871 NextContinuationToken *string `type:"string"`
12872}
12873
12874// String returns the string representation
12875func (s ListBucketAnalyticsConfigurationsOutput) String() string {
12876 return awsutil.Prettify(s)
12877}
12878
12879// GoString returns the string representation
12880func (s ListBucketAnalyticsConfigurationsOutput) GoString() string {
12881 return s.String()
12882}
12883
12884// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value.
12885func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput {
12886 s.AnalyticsConfigurationList = v
12887 return s
12888}
12889
12890// SetContinuationToken sets the ContinuationToken field's value.
12891func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput {
12892 s.ContinuationToken = &v
12893 return s
12894}
12895
12896// SetIsTruncated sets the IsTruncated field's value.
12897func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput {
12898 s.IsTruncated = &v
12899 return s
12900}
12901
12902// SetNextContinuationToken sets the NextContinuationToken field's value.
12903func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput {
12904 s.NextContinuationToken = &v
12905 return s
12906}
12907
12908// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest
12909type ListBucketInventoryConfigurationsInput struct {
12910 _ struct{} `type:"structure"`
12911
12912 // The name of the bucket containing the inventory configurations to retrieve.
12913 //
12914 // Bucket is a required field
12915 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
12916
12917 // The marker used to continue an inventory configuration listing that has been
12918 // truncated. Use the NextContinuationToken from a previously truncated list
12919 // response to continue the listing. The continuation token is an opaque value
12920 // that Amazon S3 understands.
12921 ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
12922}
12923
12924// String returns the string representation
12925func (s ListBucketInventoryConfigurationsInput) String() string {
12926 return awsutil.Prettify(s)
12927}
12928
12929// GoString returns the string representation
12930func (s ListBucketInventoryConfigurationsInput) GoString() string {
12931 return s.String()
12932}
12933
12934// Validate inspects the fields of the type to determine if they are valid.
12935func (s *ListBucketInventoryConfigurationsInput) Validate() error {
12936 invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"}
12937 if s.Bucket == nil {
12938 invalidParams.Add(request.NewErrParamRequired("Bucket"))
12939 }
12940
12941 if invalidParams.Len() > 0 {
12942 return invalidParams
12943 }
12944 return nil
12945}
12946
12947// SetBucket sets the Bucket field's value.
12948func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput {
12949 s.Bucket = &v
12950 return s
12951}
12952
12953// SetContinuationToken sets the ContinuationToken field's value.
12954func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput {
12955 s.ContinuationToken = &v
12956 return s
12957}
12958
12959// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput
12960type ListBucketInventoryConfigurationsOutput struct {
12961 _ struct{} `type:"structure"`
12962
12963 // If sent in the request, the marker that is used as a starting point for this
12964 // inventory configuration list response.
12965 ContinuationToken *string `type:"string"`
12966
12967 // The list of inventory configurations for a bucket.
12968 InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"`
12969
12970 // Indicates whether the returned list of inventory configurations is truncated
12971 // in this response. A value of true indicates that the list is truncated.
12972 IsTruncated *bool `type:"boolean"`
12973
12974 // The marker used to continue this inventory configuration listing. Use the
12975 // NextContinuationToken from this response to continue the listing in a subsequent
12976 // request. The continuation token is an opaque value that Amazon S3 understands.
12977 NextContinuationToken *string `type:"string"`
12978}
12979
12980// String returns the string representation
12981func (s ListBucketInventoryConfigurationsOutput) String() string {
12982 return awsutil.Prettify(s)
12983}
12984
12985// GoString returns the string representation
12986func (s ListBucketInventoryConfigurationsOutput) GoString() string {
12987 return s.String()
12988}
12989
12990// SetContinuationToken sets the ContinuationToken field's value.
12991func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput {
12992 s.ContinuationToken = &v
12993 return s
12994}
12995
12996// SetInventoryConfigurationList sets the InventoryConfigurationList field's value.
12997func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput {
12998 s.InventoryConfigurationList = v
12999 return s
13000}
13001
13002// SetIsTruncated sets the IsTruncated field's value.
13003func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput {
13004 s.IsTruncated = &v
13005 return s
13006}
13007
13008// SetNextContinuationToken sets the NextContinuationToken field's value.
13009func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput {
13010 s.NextContinuationToken = &v
13011 return s
13012}
13013
13014// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest
13015type ListBucketMetricsConfigurationsInput struct {
13016 _ struct{} `type:"structure"`
13017
13018 // The name of the bucket containing the metrics configurations to retrieve.
13019 //
13020 // Bucket is a required field
13021 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
13022
13023 // The marker that is used to continue a metrics configuration listing that
13024 // has been truncated. Use the NextContinuationToken from a previously truncated
13025 // list response to continue the listing. The continuation token is an opaque
13026 // value that Amazon S3 understands.
13027 ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
13028}
13029
13030// String returns the string representation
13031func (s ListBucketMetricsConfigurationsInput) String() string {
13032 return awsutil.Prettify(s)
13033}
13034
13035// GoString returns the string representation
13036func (s ListBucketMetricsConfigurationsInput) GoString() string {
13037 return s.String()
13038}
13039
13040// Validate inspects the fields of the type to determine if they are valid.
13041func (s *ListBucketMetricsConfigurationsInput) Validate() error {
13042 invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"}
13043 if s.Bucket == nil {
13044 invalidParams.Add(request.NewErrParamRequired("Bucket"))
13045 }
13046
13047 if invalidParams.Len() > 0 {
13048 return invalidParams
13049 }
13050 return nil
13051}
13052
13053// SetBucket sets the Bucket field's value.
13054func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput {
13055 s.Bucket = &v
13056 return s
13057}
13058
13059// SetContinuationToken sets the ContinuationToken field's value.
13060func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput {
13061 s.ContinuationToken = &v
13062 return s
13063}
13064
13065// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput
13066type ListBucketMetricsConfigurationsOutput struct {
13067 _ struct{} `type:"structure"`
13068
13069 // The marker that is used as a starting point for this metrics configuration
13070 // list response. This value is present if it was sent in the request.
13071 ContinuationToken *string `type:"string"`
13072
13073 // Indicates whether the returned list of metrics configurations is complete.
13074 // A value of true indicates that the list is not complete and the NextContinuationToken
13075 // will be provided for a subsequent request.
13076 IsTruncated *bool `type:"boolean"`
13077
13078 // The list of metrics configurations for a bucket.
13079 MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"`
13080
13081 // The marker used to continue a metrics configuration listing that has been
13082 // truncated. Use the NextContinuationToken from a previously truncated list
13083 // response to continue the listing. The continuation token is an opaque value
13084 // that Amazon S3 understands.
13085 NextContinuationToken *string `type:"string"`
13086}
13087
13088// String returns the string representation
13089func (s ListBucketMetricsConfigurationsOutput) String() string {
13090 return awsutil.Prettify(s)
13091}
13092
13093// GoString returns the string representation
13094func (s ListBucketMetricsConfigurationsOutput) GoString() string {
13095 return s.String()
13096}
13097
13098// SetContinuationToken sets the ContinuationToken field's value.
13099func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput {
13100 s.ContinuationToken = &v
13101 return s
13102}
13103
13104// SetIsTruncated sets the IsTruncated field's value.
13105func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput {
13106 s.IsTruncated = &v
13107 return s
13108}
13109
13110// SetMetricsConfigurationList sets the MetricsConfigurationList field's value.
13111func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput {
13112 s.MetricsConfigurationList = v
13113 return s
13114}
13115
13116// SetNextContinuationToken sets the NextContinuationToken field's value.
13117func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput {
13118 s.NextContinuationToken = &v
13119 return s
13120}
13121
13122// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput
13123type ListBucketsInput struct {
13124 _ struct{} `type:"structure"`
13125}
13126
13127// String returns the string representation
13128func (s ListBucketsInput) String() string {
13129 return awsutil.Prettify(s)
13130}
13131
13132// GoString returns the string representation
13133func (s ListBucketsInput) GoString() string {
13134 return s.String()
13135}
13136
13137// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput
13138type ListBucketsOutput struct {
13139 _ struct{} `type:"structure"`
13140
13141 Buckets []*Bucket `locationNameList:"Bucket" type:"list"`
13142
13143 Owner *Owner `type:"structure"`
13144}
13145
13146// String returns the string representation
13147func (s ListBucketsOutput) String() string {
13148 return awsutil.Prettify(s)
13149}
13150
13151// GoString returns the string representation
13152func (s ListBucketsOutput) GoString() string {
13153 return s.String()
13154}
13155
13156// SetBuckets sets the Buckets field's value.
13157func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput {
13158 s.Buckets = v
13159 return s
13160}
13161
13162// SetOwner sets the Owner field's value.
13163func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput {
13164 s.Owner = v
13165 return s
13166}
13167
13168// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest
13169type ListMultipartUploadsInput struct {
13170 _ struct{} `type:"structure"`
13171
13172 // Bucket is a required field
13173 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
13174
13175 // Character you use to group keys.
13176 Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
13177
13178 // Requests Amazon S3 to encode the object keys in the response and specifies
13179 // the encoding method to use. An object key may contain any Unicode character;
13180 // however, XML 1.0 parser cannot parse some characters, such as characters
13181 // with an ASCII value from 0 to 10. For characters that are not supported in
13182 // XML 1.0, you can add this parameter to request that Amazon S3 encode the
13183 // keys in the response.
13184 EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
13185
13186 // Together with upload-id-marker, this parameter specifies the multipart upload
13187 // after which listing should begin.
13188 KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
13189
13190 // Sets the maximum number of multipart uploads, from 1 to 1,000, to return
13191 // in the response body. 1,000 is the maximum number of uploads that can be
13192 // returned in a response.
13193 MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"`
13194
13195 // Lists in-progress uploads only for those keys that begin with the specified
13196 // prefix.
13197 Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
13198
13199 // Together with key-marker, specifies the multipart upload after which listing
13200 // should begin. If key-marker is not specified, the upload-id-marker parameter
13201 // is ignored.
13202 UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"`
13203}
13204
13205// String returns the string representation
13206func (s ListMultipartUploadsInput) String() string {
13207 return awsutil.Prettify(s)
13208}
13209
13210// GoString returns the string representation
13211func (s ListMultipartUploadsInput) GoString() string {
13212 return s.String()
13213}
13214
13215// Validate inspects the fields of the type to determine if they are valid.
13216func (s *ListMultipartUploadsInput) Validate() error {
13217 invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"}
13218 if s.Bucket == nil {
13219 invalidParams.Add(request.NewErrParamRequired("Bucket"))
13220 }
13221
13222 if invalidParams.Len() > 0 {
13223 return invalidParams
13224 }
13225 return nil
13226}
13227
13228// SetBucket sets the Bucket field's value.
13229func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput {
13230 s.Bucket = &v
13231 return s
13232}
13233
13234// SetDelimiter sets the Delimiter field's value.
13235func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput {
13236 s.Delimiter = &v
13237 return s
13238}
13239
13240// SetEncodingType sets the EncodingType field's value.
13241func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput {
13242 s.EncodingType = &v
13243 return s
13244}
13245
13246// SetKeyMarker sets the KeyMarker field's value.
13247func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput {
13248 s.KeyMarker = &v
13249 return s
13250}
13251
13252// SetMaxUploads sets the MaxUploads field's value.
13253func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput {
13254 s.MaxUploads = &v
13255 return s
13256}
13257
13258// SetPrefix sets the Prefix field's value.
13259func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput {
13260 s.Prefix = &v
13261 return s
13262}
13263
13264// SetUploadIdMarker sets the UploadIdMarker field's value.
13265func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput {
13266 s.UploadIdMarker = &v
13267 return s
13268}
13269
13270// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput
13271type ListMultipartUploadsOutput struct {
13272 _ struct{} `type:"structure"`
13273
13274 // Name of the bucket to which the multipart upload was initiated.
13275 Bucket *string `type:"string"`
13276
13277 CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
13278
13279 Delimiter *string `type:"string"`
13280
13281 // Encoding type used by Amazon S3 to encode object keys in the response.
13282 EncodingType *string `type:"string" enum:"EncodingType"`
13283
13284 // Indicates whether the returned list of multipart uploads is truncated. A
13285 // value of true indicates that the list was truncated. The list can be truncated
13286 // if the number of multipart uploads exceeds the limit allowed or specified
13287 // by max uploads.
13288 IsTruncated *bool `type:"boolean"`
13289
13290 // The key at or after which the listing began.
13291 KeyMarker *string `type:"string"`
13292
13293 // Maximum number of multipart uploads that could have been included in the
13294 // response.
13295 MaxUploads *int64 `type:"integer"`
13296
13297 // When a list is truncated, this element specifies the value that should be
13298 // used for the key-marker request parameter in a subsequent request.
13299 NextKeyMarker *string `type:"string"`
13300
13301 // When a list is truncated, this element specifies the value that should be
13302 // used for the upload-id-marker request parameter in a subsequent request.
13303 NextUploadIdMarker *string `type:"string"`
13304
13305 // When a prefix is provided in the request, this field contains the specified
13306 // prefix. The result contains only keys starting with the specified prefix.
13307 Prefix *string `type:"string"`
13308
13309 // Upload ID after which listing began.
13310 UploadIdMarker *string `type:"string"`
13311
13312 Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"`
13313}
13314
13315// String returns the string representation
13316func (s ListMultipartUploadsOutput) String() string {
13317 return awsutil.Prettify(s)
13318}
13319
13320// GoString returns the string representation
13321func (s ListMultipartUploadsOutput) GoString() string {
13322 return s.String()
13323}
13324
13325// SetBucket sets the Bucket field's value.
13326func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput {
13327 s.Bucket = &v
13328 return s
13329}
13330
13331// SetCommonPrefixes sets the CommonPrefixes field's value.
13332func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput {
13333 s.CommonPrefixes = v
13334 return s
13335}
13336
13337// SetDelimiter sets the Delimiter field's value.
13338func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput {
13339 s.Delimiter = &v
13340 return s
13341}
13342
13343// SetEncodingType sets the EncodingType field's value.
13344func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput {
13345 s.EncodingType = &v
13346 return s
13347}
13348
13349// SetIsTruncated sets the IsTruncated field's value.
13350func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput {
13351 s.IsTruncated = &v
13352 return s
13353}
13354
13355// SetKeyMarker sets the KeyMarker field's value.
13356func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput {
13357 s.KeyMarker = &v
13358 return s
13359}
13360
13361// SetMaxUploads sets the MaxUploads field's value.
13362func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput {
13363 s.MaxUploads = &v
13364 return s
13365}
13366
13367// SetNextKeyMarker sets the NextKeyMarker field's value.
13368func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput {
13369 s.NextKeyMarker = &v
13370 return s
13371}
13372
13373// SetNextUploadIdMarker sets the NextUploadIdMarker field's value.
13374func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput {
13375 s.NextUploadIdMarker = &v
13376 return s
13377}
13378
13379// SetPrefix sets the Prefix field's value.
13380func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput {
13381 s.Prefix = &v
13382 return s
13383}
13384
13385// SetUploadIdMarker sets the UploadIdMarker field's value.
13386func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput {
13387 s.UploadIdMarker = &v
13388 return s
13389}
13390
13391// SetUploads sets the Uploads field's value.
13392func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput {
13393 s.Uploads = v
13394 return s
13395}
13396
13397// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest
13398type ListObjectVersionsInput struct {
13399 _ struct{} `type:"structure"`
13400
13401 // Bucket is a required field
13402 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
13403
13404 // A delimiter is a character you use to group keys.
13405 Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
13406
13407 // Requests Amazon S3 to encode the object keys in the response and specifies
13408 // the encoding method to use. An object key may contain any Unicode character;
13409 // however, XML 1.0 parser cannot parse some characters, such as characters
13410 // with an ASCII value from 0 to 10. For characters that are not supported in
13411 // XML 1.0, you can add this parameter to request that Amazon S3 encode the
13412 // keys in the response.
13413 EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
13414
13415 // Specifies the key to start with when listing objects in a bucket.
13416 KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
13417
13418 // Sets the maximum number of keys returned in the response. The response might
13419 // contain fewer keys but will never contain more.
13420 MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
13421
13422 // Limits the response to keys that begin with the specified prefix.
13423 Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
13424
13425 // Specifies the object version you want to start listing from.
13426 VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"`
13427}
13428
13429// String returns the string representation
13430func (s ListObjectVersionsInput) String() string {
13431 return awsutil.Prettify(s)
13432}
13433
13434// GoString returns the string representation
13435func (s ListObjectVersionsInput) GoString() string {
13436 return s.String()
13437}
13438
13439// Validate inspects the fields of the type to determine if they are valid.
13440func (s *ListObjectVersionsInput) Validate() error {
13441 invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"}
13442 if s.Bucket == nil {
13443 invalidParams.Add(request.NewErrParamRequired("Bucket"))
13444 }
13445
13446 if invalidParams.Len() > 0 {
13447 return invalidParams
13448 }
13449 return nil
13450}
13451
13452// SetBucket sets the Bucket field's value.
13453func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput {
13454 s.Bucket = &v
13455 return s
13456}
13457
13458// SetDelimiter sets the Delimiter field's value.
13459func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput {
13460 s.Delimiter = &v
13461 return s
13462}
13463
13464// SetEncodingType sets the EncodingType field's value.
13465func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput {
13466 s.EncodingType = &v
13467 return s
13468}
13469
13470// SetKeyMarker sets the KeyMarker field's value.
13471func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput {
13472 s.KeyMarker = &v
13473 return s
13474}
13475
13476// SetMaxKeys sets the MaxKeys field's value.
13477func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput {
13478 s.MaxKeys = &v
13479 return s
13480}
13481
13482// SetPrefix sets the Prefix field's value.
13483func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput {
13484 s.Prefix = &v
13485 return s
13486}
13487
13488// SetVersionIdMarker sets the VersionIdMarker field's value.
13489func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput {
13490 s.VersionIdMarker = &v
13491 return s
13492}
13493
13494// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput
13495type ListObjectVersionsOutput struct {
13496 _ struct{} `type:"structure"`
13497
13498 CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
13499
13500 DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"`
13501
13502 Delimiter *string `type:"string"`
13503
13504 // Encoding type used by Amazon S3 to encode object keys in the response.
13505 EncodingType *string `type:"string" enum:"EncodingType"`
13506
13507 // A flag that indicates whether or not Amazon S3 returned all of the results
13508 // that satisfied the search criteria. If your results were truncated, you can
13509 // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker
13510 // response parameters as a starting place in another request to return the
13511 // rest of the results.
13512 IsTruncated *bool `type:"boolean"`
13513
13514 // Marks the last Key returned in a truncated response.
13515 KeyMarker *string `type:"string"`
13516
13517 MaxKeys *int64 `type:"integer"`
13518
13519 Name *string `type:"string"`
13520
13521 // Use this value for the key marker request parameter in a subsequent request.
13522 NextKeyMarker *string `type:"string"`
13523
13524 // Use this value for the next version id marker parameter in a subsequent request.
13525 NextVersionIdMarker *string `type:"string"`
13526
13527 Prefix *string `type:"string"`
13528
13529 VersionIdMarker *string `type:"string"`
13530
13531 Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"`
13532}
13533
13534// String returns the string representation
13535func (s ListObjectVersionsOutput) String() string {
13536 return awsutil.Prettify(s)
13537}
13538
13539// GoString returns the string representation
13540func (s ListObjectVersionsOutput) GoString() string {
13541 return s.String()
13542}
13543
13544// SetCommonPrefixes sets the CommonPrefixes field's value.
13545func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput {
13546 s.CommonPrefixes = v
13547 return s
13548}
13549
13550// SetDeleteMarkers sets the DeleteMarkers field's value.
13551func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput {
13552 s.DeleteMarkers = v
13553 return s
13554}
13555
13556// SetDelimiter sets the Delimiter field's value.
13557func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput {
13558 s.Delimiter = &v
13559 return s
13560}
13561
13562// SetEncodingType sets the EncodingType field's value.
13563func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput {
13564 s.EncodingType = &v
13565 return s
13566}
13567
13568// SetIsTruncated sets the IsTruncated field's value.
13569func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput {
13570 s.IsTruncated = &v
13571 return s
13572}
13573
13574// SetKeyMarker sets the KeyMarker field's value.
13575func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput {
13576 s.KeyMarker = &v
13577 return s
13578}
13579
13580// SetMaxKeys sets the MaxKeys field's value.
13581func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput {
13582 s.MaxKeys = &v
13583 return s
13584}
13585
13586// SetName sets the Name field's value.
13587func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput {
13588 s.Name = &v
13589 return s
13590}
13591
13592// SetNextKeyMarker sets the NextKeyMarker field's value.
13593func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput {
13594 s.NextKeyMarker = &v
13595 return s
13596}
13597
13598// SetNextVersionIdMarker sets the NextVersionIdMarker field's value.
13599func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput {
13600 s.NextVersionIdMarker = &v
13601 return s
13602}
13603
13604// SetPrefix sets the Prefix field's value.
13605func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput {
13606 s.Prefix = &v
13607 return s
13608}
13609
13610// SetVersionIdMarker sets the VersionIdMarker field's value.
13611func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput {
13612 s.VersionIdMarker = &v
13613 return s
13614}
13615
13616// SetVersions sets the Versions field's value.
13617func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput {
13618 s.Versions = v
13619 return s
13620}
13621
13622// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest
13623type ListObjectsInput struct {
13624 _ struct{} `type:"structure"`
13625
13626 // Bucket is a required field
13627 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
13628
13629 // A delimiter is a character you use to group keys.
13630 Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
13631
13632 // Requests Amazon S3 to encode the object keys in the response and specifies
13633 // the encoding method to use. An object key may contain any Unicode character;
13634 // however, XML 1.0 parser cannot parse some characters, such as characters
13635 // with an ASCII value from 0 to 10. For characters that are not supported in
13636 // XML 1.0, you can add this parameter to request that Amazon S3 encode the
13637 // keys in the response.
13638 EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
13639
13640 // Specifies the key to start with when listing objects in a bucket.
13641 Marker *string `location:"querystring" locationName:"marker" type:"string"`
13642
13643 // Sets the maximum number of keys returned in the response. The response might
13644 // contain fewer keys but will never contain more.
13645 MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
13646
13647 // Limits the response to keys that begin with the specified prefix.
13648 Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
13649
13650 // Confirms that the requester knows that she or he will be charged for the
13651 // list objects request. Bucket owners need not specify this parameter in their
13652 // requests.
13653 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
13654}
13655
13656// String returns the string representation
13657func (s ListObjectsInput) String() string {
13658 return awsutil.Prettify(s)
13659}
13660
13661// GoString returns the string representation
13662func (s ListObjectsInput) GoString() string {
13663 return s.String()
13664}
13665
13666// Validate inspects the fields of the type to determine if they are valid.
13667func (s *ListObjectsInput) Validate() error {
13668 invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"}
13669 if s.Bucket == nil {
13670 invalidParams.Add(request.NewErrParamRequired("Bucket"))
13671 }
13672
13673 if invalidParams.Len() > 0 {
13674 return invalidParams
13675 }
13676 return nil
13677}
13678
13679// SetBucket sets the Bucket field's value.
13680func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput {
13681 s.Bucket = &v
13682 return s
13683}
13684
13685// SetDelimiter sets the Delimiter field's value.
13686func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput {
13687 s.Delimiter = &v
13688 return s
13689}
13690
13691// SetEncodingType sets the EncodingType field's value.
13692func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput {
13693 s.EncodingType = &v
13694 return s
13695}
13696
13697// SetMarker sets the Marker field's value.
13698func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput {
13699 s.Marker = &v
13700 return s
13701}
13702
13703// SetMaxKeys sets the MaxKeys field's value.
13704func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput {
13705 s.MaxKeys = &v
13706 return s
13707}
13708
13709// SetPrefix sets the Prefix field's value.
13710func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput {
13711 s.Prefix = &v
13712 return s
13713}
13714
13715// SetRequestPayer sets the RequestPayer field's value.
13716func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput {
13717 s.RequestPayer = &v
13718 return s
13719}
13720
13721// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput
13722type ListObjectsOutput struct {
13723 _ struct{} `type:"structure"`
13724
13725 CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
13726
13727 Contents []*Object `type:"list" flattened:"true"`
13728
13729 Delimiter *string `type:"string"`
13730
13731 // Encoding type used by Amazon S3 to encode object keys in the response.
13732 EncodingType *string `type:"string" enum:"EncodingType"`
13733
13734 // A flag that indicates whether or not Amazon S3 returned all of the results
13735 // that satisfied the search criteria.
13736 IsTruncated *bool `type:"boolean"`
13737
13738 Marker *string `type:"string"`
13739
13740 MaxKeys *int64 `type:"integer"`
13741
13742 Name *string `type:"string"`
13743
13744 // When response is truncated (the IsTruncated element value in the response
13745 // is true), you can use the key name in this field as marker in the subsequent
13746 // request to get next set of objects. Amazon S3 lists objects in alphabetical
13747 // order Note: This element is returned only if you have delimiter request parameter
13748 // specified. If response does not include the NextMaker and it is truncated,
13749 // you can use the value of the last Key in the response as the marker in the
13750 // subsequent request to get the next set of object keys.
13751 NextMarker *string `type:"string"`
13752
13753 Prefix *string `type:"string"`
13754}
13755
13756// String returns the string representation
13757func (s ListObjectsOutput) String() string {
13758 return awsutil.Prettify(s)
13759}
13760
13761// GoString returns the string representation
13762func (s ListObjectsOutput) GoString() string {
13763 return s.String()
13764}
13765
13766// SetCommonPrefixes sets the CommonPrefixes field's value.
13767func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput {
13768 s.CommonPrefixes = v
13769 return s
13770}
13771
13772// SetContents sets the Contents field's value.
13773func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput {
13774 s.Contents = v
13775 return s
13776}
13777
13778// SetDelimiter sets the Delimiter field's value.
13779func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput {
13780 s.Delimiter = &v
13781 return s
13782}
13783
13784// SetEncodingType sets the EncodingType field's value.
13785func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput {
13786 s.EncodingType = &v
13787 return s
13788}
13789
13790// SetIsTruncated sets the IsTruncated field's value.
13791func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput {
13792 s.IsTruncated = &v
13793 return s
13794}
13795
13796// SetMarker sets the Marker field's value.
13797func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput {
13798 s.Marker = &v
13799 return s
13800}
13801
13802// SetMaxKeys sets the MaxKeys field's value.
13803func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput {
13804 s.MaxKeys = &v
13805 return s
13806}
13807
13808// SetName sets the Name field's value.
13809func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput {
13810 s.Name = &v
13811 return s
13812}
13813
13814// SetNextMarker sets the NextMarker field's value.
13815func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput {
13816 s.NextMarker = &v
13817 return s
13818}
13819
13820// SetPrefix sets the Prefix field's value.
13821func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput {
13822 s.Prefix = &v
13823 return s
13824}
13825
13826// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request
13827type ListObjectsV2Input struct {
13828 _ struct{} `type:"structure"`
13829
13830 // Name of the bucket to list.
13831 //
13832 // Bucket is a required field
13833 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
13834
13835 // ContinuationToken indicates Amazon S3 that the list is being continued on
13836 // this bucket with a token. ContinuationToken is obfuscated and is not a real
13837 // key
13838 ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
13839
13840 // A delimiter is a character you use to group keys.
13841 Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
13842
13843 // Encoding type used by Amazon S3 to encode object keys in the response.
13844 EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
13845
13846 // The owner field is not present in listV2 by default, if you want to return
13847 // owner field with each key in the result then set the fetch owner field to
13848 // true
13849 FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"`
13850
13851 // Sets the maximum number of keys returned in the response. The response might
13852 // contain fewer keys but will never contain more.
13853 MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
13854
13855 // Limits the response to keys that begin with the specified prefix.
13856 Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
13857
13858 // Confirms that the requester knows that she or he will be charged for the
13859 // list objects request in V2 style. Bucket owners need not specify this parameter
13860 // in their requests.
13861 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
13862
13863 // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
13864 // listing after this specified key. StartAfter can be any key in the bucket
13865 StartAfter *string `location:"querystring" locationName:"start-after" type:"string"`
13866}
13867
13868// String returns the string representation
13869func (s ListObjectsV2Input) String() string {
13870 return awsutil.Prettify(s)
13871}
13872
13873// GoString returns the string representation
13874func (s ListObjectsV2Input) GoString() string {
13875 return s.String()
13876}
13877
13878// Validate inspects the fields of the type to determine if they are valid.
13879func (s *ListObjectsV2Input) Validate() error {
13880 invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"}
13881 if s.Bucket == nil {
13882 invalidParams.Add(request.NewErrParamRequired("Bucket"))
13883 }
13884
13885 if invalidParams.Len() > 0 {
13886 return invalidParams
13887 }
13888 return nil
13889}
13890
13891// SetBucket sets the Bucket field's value.
13892func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input {
13893 s.Bucket = &v
13894 return s
13895}
13896
13897// SetContinuationToken sets the ContinuationToken field's value.
13898func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input {
13899 s.ContinuationToken = &v
13900 return s
13901}
13902
13903// SetDelimiter sets the Delimiter field's value.
13904func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input {
13905 s.Delimiter = &v
13906 return s
13907}
13908
13909// SetEncodingType sets the EncodingType field's value.
13910func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input {
13911 s.EncodingType = &v
13912 return s
13913}
13914
13915// SetFetchOwner sets the FetchOwner field's value.
13916func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input {
13917 s.FetchOwner = &v
13918 return s
13919}
13920
13921// SetMaxKeys sets the MaxKeys field's value.
13922func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input {
13923 s.MaxKeys = &v
13924 return s
13925}
13926
13927// SetPrefix sets the Prefix field's value.
13928func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input {
13929 s.Prefix = &v
13930 return s
13931}
13932
13933// SetRequestPayer sets the RequestPayer field's value.
13934func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input {
13935 s.RequestPayer = &v
13936 return s
13937}
13938
13939// SetStartAfter sets the StartAfter field's value.
13940func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input {
13941 s.StartAfter = &v
13942 return s
13943}
13944
13945// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output
13946type ListObjectsV2Output struct {
13947 _ struct{} `type:"structure"`
13948
13949 // CommonPrefixes contains all (if there are any) keys between Prefix and the
13950 // next occurrence of the string specified by delimiter
13951 CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
13952
13953 // Metadata about each object returned.
13954 Contents []*Object `type:"list" flattened:"true"`
13955
13956 // ContinuationToken indicates Amazon S3 that the list is being continued on
13957 // this bucket with a token. ContinuationToken is obfuscated and is not a real
13958 // key
13959 ContinuationToken *string `type:"string"`
13960
13961 // A delimiter is a character you use to group keys.
13962 Delimiter *string `type:"string"`
13963
13964 // Encoding type used by Amazon S3 to encode object keys in the response.
13965 EncodingType *string `type:"string" enum:"EncodingType"`
13966
13967 // A flag that indicates whether or not Amazon S3 returned all of the results
13968 // that satisfied the search criteria.
13969 IsTruncated *bool `type:"boolean"`
13970
13971 // KeyCount is the number of keys returned with this request. KeyCount will
13972 // always be less than equals to MaxKeys field. Say you ask for 50 keys, your
13973 // result will include less than equals 50 keys
13974 KeyCount *int64 `type:"integer"`
13975
13976 // Sets the maximum number of keys returned in the response. The response might
13977 // contain fewer keys but will never contain more.
13978 MaxKeys *int64 `type:"integer"`
13979
13980 // Name of the bucket to list.
13981 Name *string `type:"string"`
13982
13983 // NextContinuationToken is sent when isTruncated is true which means there
13984 // are more keys in the bucket that can be listed. The next list requests to
13985 // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken
13986 // is obfuscated and is not a real key
13987 NextContinuationToken *string `type:"string"`
13988
13989 // Limits the response to keys that begin with the specified prefix.
13990 Prefix *string `type:"string"`
13991
13992 // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
13993 // listing after this specified key. StartAfter can be any key in the bucket
13994 StartAfter *string `type:"string"`
13995}
13996
13997// String returns the string representation
13998func (s ListObjectsV2Output) String() string {
13999 return awsutil.Prettify(s)
14000}
14001
14002// GoString returns the string representation
14003func (s ListObjectsV2Output) GoString() string {
14004 return s.String()
14005}
14006
14007// SetCommonPrefixes sets the CommonPrefixes field's value.
14008func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output {
14009 s.CommonPrefixes = v
14010 return s
14011}
14012
14013// SetContents sets the Contents field's value.
14014func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output {
14015 s.Contents = v
14016 return s
14017}
14018
14019// SetContinuationToken sets the ContinuationToken field's value.
14020func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output {
14021 s.ContinuationToken = &v
14022 return s
14023}
14024
14025// SetDelimiter sets the Delimiter field's value.
14026func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output {
14027 s.Delimiter = &v
14028 return s
14029}
14030
14031// SetEncodingType sets the EncodingType field's value.
14032func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output {
14033 s.EncodingType = &v
14034 return s
14035}
14036
14037// SetIsTruncated sets the IsTruncated field's value.
14038func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output {
14039 s.IsTruncated = &v
14040 return s
14041}
14042
14043// SetKeyCount sets the KeyCount field's value.
14044func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output {
14045 s.KeyCount = &v
14046 return s
14047}
14048
14049// SetMaxKeys sets the MaxKeys field's value.
14050func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output {
14051 s.MaxKeys = &v
14052 return s
14053}
14054
14055// SetName sets the Name field's value.
14056func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output {
14057 s.Name = &v
14058 return s
14059}
14060
14061// SetNextContinuationToken sets the NextContinuationToken field's value.
14062func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output {
14063 s.NextContinuationToken = &v
14064 return s
14065}
14066
14067// SetPrefix sets the Prefix field's value.
14068func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output {
14069 s.Prefix = &v
14070 return s
14071}
14072
14073// SetStartAfter sets the StartAfter field's value.
14074func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output {
14075 s.StartAfter = &v
14076 return s
14077}
14078
14079// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest
14080type ListPartsInput struct {
14081 _ struct{} `type:"structure"`
14082
14083 // Bucket is a required field
14084 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
14085
14086 // Key is a required field
14087 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
14088
14089 // Sets the maximum number of parts to return.
14090 MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"`
14091
14092 // Specifies the part after which listing should begin. Only parts with higher
14093 // part numbers will be listed.
14094 PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"`
14095
14096 // Confirms that the requester knows that she or he will be charged for the
14097 // request. Bucket owners need not specify this parameter in their requests.
14098 // Documentation on downloading objects from requester pays buckets can be found
14099 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
14100 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
14101
14102 // Upload ID identifying the multipart upload whose parts are being listed.
14103 //
14104 // UploadId is a required field
14105 UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
14106}
14107
14108// String returns the string representation
14109func (s ListPartsInput) String() string {
14110 return awsutil.Prettify(s)
14111}
14112
14113// GoString returns the string representation
14114func (s ListPartsInput) GoString() string {
14115 return s.String()
14116}
14117
14118// Validate inspects the fields of the type to determine if they are valid.
14119func (s *ListPartsInput) Validate() error {
14120 invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"}
14121 if s.Bucket == nil {
14122 invalidParams.Add(request.NewErrParamRequired("Bucket"))
14123 }
14124 if s.Key == nil {
14125 invalidParams.Add(request.NewErrParamRequired("Key"))
14126 }
14127 if s.Key != nil && len(*s.Key) < 1 {
14128 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
14129 }
14130 if s.UploadId == nil {
14131 invalidParams.Add(request.NewErrParamRequired("UploadId"))
14132 }
14133
14134 if invalidParams.Len() > 0 {
14135 return invalidParams
14136 }
14137 return nil
14138}
14139
14140// SetBucket sets the Bucket field's value.
14141func (s *ListPartsInput) SetBucket(v string) *ListPartsInput {
14142 s.Bucket = &v
14143 return s
14144}
14145
14146// SetKey sets the Key field's value.
14147func (s *ListPartsInput) SetKey(v string) *ListPartsInput {
14148 s.Key = &v
14149 return s
14150}
14151
14152// SetMaxParts sets the MaxParts field's value.
14153func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput {
14154 s.MaxParts = &v
14155 return s
14156}
14157
14158// SetPartNumberMarker sets the PartNumberMarker field's value.
14159func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput {
14160 s.PartNumberMarker = &v
14161 return s
14162}
14163
14164// SetRequestPayer sets the RequestPayer field's value.
14165func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput {
14166 s.RequestPayer = &v
14167 return s
14168}
14169
14170// SetUploadId sets the UploadId field's value.
14171func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput {
14172 s.UploadId = &v
14173 return s
14174}
14175
14176// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput
14177type ListPartsOutput struct {
14178 _ struct{} `type:"structure"`
14179
14180 // Date when multipart upload will become eligible for abort operation by lifecycle.
14181 AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
14182
14183 // Id of the lifecycle rule that makes a multipart upload eligible for abort
14184 // operation.
14185 AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
14186
14187 // Name of the bucket to which the multipart upload was initiated.
14188 Bucket *string `type:"string"`
14189
14190 // Identifies who initiated the multipart upload.
14191 Initiator *Initiator `type:"structure"`
14192
14193 // Indicates whether the returned list of parts is truncated.
14194 IsTruncated *bool `type:"boolean"`
14195
14196 // Object key for which the multipart upload was initiated.
14197 Key *string `min:"1" type:"string"`
14198
14199 // Maximum number of parts that were allowed in the response.
14200 MaxParts *int64 `type:"integer"`
14201
14202 // When a list is truncated, this element specifies the last part in the list,
14203 // as well as the value to use for the part-number-marker request parameter
14204 // in a subsequent request.
14205 NextPartNumberMarker *int64 `type:"integer"`
14206
14207 Owner *Owner `type:"structure"`
14208
14209 // Part number after which listing begins.
14210 PartNumberMarker *int64 `type:"integer"`
14211
14212 Parts []*Part `locationName:"Part" type:"list" flattened:"true"`
14213
14214 // If present, indicates that the requester was successfully charged for the
14215 // request.
14216 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
14217
14218 // The class of storage used to store the object.
14219 StorageClass *string `type:"string" enum:"StorageClass"`
14220
14221 // Upload ID identifying the multipart upload whose parts are being listed.
14222 UploadId *string `type:"string"`
14223}
14224
14225// String returns the string representation
14226func (s ListPartsOutput) String() string {
14227 return awsutil.Prettify(s)
14228}
14229
14230// GoString returns the string representation
14231func (s ListPartsOutput) GoString() string {
14232 return s.String()
14233}
14234
14235// SetAbortDate sets the AbortDate field's value.
14236func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput {
14237 s.AbortDate = &v
14238 return s
14239}
14240
14241// SetAbortRuleId sets the AbortRuleId field's value.
14242func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput {
14243 s.AbortRuleId = &v
14244 return s
14245}
14246
14247// SetBucket sets the Bucket field's value.
14248func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput {
14249 s.Bucket = &v
14250 return s
14251}
14252
14253// SetInitiator sets the Initiator field's value.
14254func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput {
14255 s.Initiator = v
14256 return s
14257}
14258
14259// SetIsTruncated sets the IsTruncated field's value.
14260func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput {
14261 s.IsTruncated = &v
14262 return s
14263}
14264
14265// SetKey sets the Key field's value.
14266func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput {
14267 s.Key = &v
14268 return s
14269}
14270
14271// SetMaxParts sets the MaxParts field's value.
14272func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput {
14273 s.MaxParts = &v
14274 return s
14275}
14276
14277// SetNextPartNumberMarker sets the NextPartNumberMarker field's value.
14278func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput {
14279 s.NextPartNumberMarker = &v
14280 return s
14281}
14282
14283// SetOwner sets the Owner field's value.
14284func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput {
14285 s.Owner = v
14286 return s
14287}
14288
14289// SetPartNumberMarker sets the PartNumberMarker field's value.
14290func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput {
14291 s.PartNumberMarker = &v
14292 return s
14293}
14294
14295// SetParts sets the Parts field's value.
14296func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput {
14297 s.Parts = v
14298 return s
14299}
14300
14301// SetRequestCharged sets the RequestCharged field's value.
14302func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput {
14303 s.RequestCharged = &v
14304 return s
14305}
14306
14307// SetStorageClass sets the StorageClass field's value.
14308func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput {
14309 s.StorageClass = &v
14310 return s
14311}
14312
14313// SetUploadId sets the UploadId field's value.
14314func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput {
14315 s.UploadId = &v
14316 return s
14317}
14318
14319// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled
14320type LoggingEnabled struct {
14321 _ struct{} `type:"structure"`
14322
14323 // Specifies the bucket where you want Amazon S3 to store server access logs.
14324 // You can have your logs delivered to any bucket that you own, including the
14325 // same bucket that is being logged. You can also configure multiple buckets
14326 // to deliver their logs to the same target bucket. In this case you should
14327 // choose a different TargetPrefix for each source bucket so that the delivered
14328 // log files can be distinguished by key.
14329 TargetBucket *string `type:"string"`
14330
14331 TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"`
14332
14333 // This element lets you specify a prefix for the keys that the log files will
14334 // be stored under.
14335 TargetPrefix *string `type:"string"`
14336}
14337
14338// String returns the string representation
14339func (s LoggingEnabled) String() string {
14340 return awsutil.Prettify(s)
14341}
14342
14343// GoString returns the string representation
14344func (s LoggingEnabled) GoString() string {
14345 return s.String()
14346}
14347
14348// Validate inspects the fields of the type to determine if they are valid.
14349func (s *LoggingEnabled) Validate() error {
14350 invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"}
14351 if s.TargetGrants != nil {
14352 for i, v := range s.TargetGrants {
14353 if v == nil {
14354 continue
14355 }
14356 if err := v.Validate(); err != nil {
14357 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams))
14358 }
14359 }
14360 }
14361
14362 if invalidParams.Len() > 0 {
14363 return invalidParams
14364 }
14365 return nil
14366}
14367
14368// SetTargetBucket sets the TargetBucket field's value.
14369func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled {
14370 s.TargetBucket = &v
14371 return s
14372}
14373
14374// SetTargetGrants sets the TargetGrants field's value.
14375func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled {
14376 s.TargetGrants = v
14377 return s
14378}
14379
14380// SetTargetPrefix sets the TargetPrefix field's value.
14381func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled {
14382 s.TargetPrefix = &v
14383 return s
14384}
14385
14386// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator
14387type MetricsAndOperator struct {
14388 _ struct{} `type:"structure"`
14389
14390 // The prefix used when evaluating an AND predicate.
14391 Prefix *string `type:"string"`
14392
14393 // The list of tags used when evaluating an AND predicate.
14394 Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
14395}
14396
14397// String returns the string representation
14398func (s MetricsAndOperator) String() string {
14399 return awsutil.Prettify(s)
14400}
14401
14402// GoString returns the string representation
14403func (s MetricsAndOperator) GoString() string {
14404 return s.String()
14405}
14406
14407// Validate inspects the fields of the type to determine if they are valid.
14408func (s *MetricsAndOperator) Validate() error {
14409 invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"}
14410 if s.Tags != nil {
14411 for i, v := range s.Tags {
14412 if v == nil {
14413 continue
14414 }
14415 if err := v.Validate(); err != nil {
14416 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
14417 }
14418 }
14419 }
14420
14421 if invalidParams.Len() > 0 {
14422 return invalidParams
14423 }
14424 return nil
14425}
14426
14427// SetPrefix sets the Prefix field's value.
14428func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator {
14429 s.Prefix = &v
14430 return s
14431}
14432
14433// SetTags sets the Tags field's value.
14434func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator {
14435 s.Tags = v
14436 return s
14437}
14438
14439// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration
14440type MetricsConfiguration struct {
14441 _ struct{} `type:"structure"`
14442
14443 // Specifies a metrics configuration filter. The metrics configuration will
14444 // only include objects that meet the filter's criteria. A filter must be a
14445 // prefix, a tag, or a conjunction (MetricsAndOperator).
14446 Filter *MetricsFilter `type:"structure"`
14447
14448 // The ID used to identify the metrics configuration.
14449 //
14450 // Id is a required field
14451 Id *string `type:"string" required:"true"`
14452}
14453
14454// String returns the string representation
14455func (s MetricsConfiguration) String() string {
14456 return awsutil.Prettify(s)
14457}
14458
14459// GoString returns the string representation
14460func (s MetricsConfiguration) GoString() string {
14461 return s.String()
14462}
14463
14464// Validate inspects the fields of the type to determine if they are valid.
14465func (s *MetricsConfiguration) Validate() error {
14466 invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"}
14467 if s.Id == nil {
14468 invalidParams.Add(request.NewErrParamRequired("Id"))
14469 }
14470 if s.Filter != nil {
14471 if err := s.Filter.Validate(); err != nil {
14472 invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
14473 }
14474 }
14475
14476 if invalidParams.Len() > 0 {
14477 return invalidParams
14478 }
14479 return nil
14480}
14481
14482// SetFilter sets the Filter field's value.
14483func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration {
14484 s.Filter = v
14485 return s
14486}
14487
14488// SetId sets the Id field's value.
14489func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration {
14490 s.Id = &v
14491 return s
14492}
14493
14494// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter
14495type MetricsFilter struct {
14496 _ struct{} `type:"structure"`
14497
14498 // A conjunction (logical AND) of predicates, which is used in evaluating a
14499 // metrics filter. The operator must have at least two predicates, and an object
14500 // must match all of the predicates in order for the filter to apply.
14501 And *MetricsAndOperator `type:"structure"`
14502
14503 // The prefix used when evaluating a metrics filter.
14504 Prefix *string `type:"string"`
14505
14506 // The tag used when evaluating a metrics filter.
14507 Tag *Tag `type:"structure"`
14508}
14509
14510// String returns the string representation
14511func (s MetricsFilter) String() string {
14512 return awsutil.Prettify(s)
14513}
14514
14515// GoString returns the string representation
14516func (s MetricsFilter) GoString() string {
14517 return s.String()
14518}
14519
14520// Validate inspects the fields of the type to determine if they are valid.
14521func (s *MetricsFilter) Validate() error {
14522 invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"}
14523 if s.And != nil {
14524 if err := s.And.Validate(); err != nil {
14525 invalidParams.AddNested("And", err.(request.ErrInvalidParams))
14526 }
14527 }
14528 if s.Tag != nil {
14529 if err := s.Tag.Validate(); err != nil {
14530 invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
14531 }
14532 }
14533
14534 if invalidParams.Len() > 0 {
14535 return invalidParams
14536 }
14537 return nil
14538}
14539
14540// SetAnd sets the And field's value.
14541func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter {
14542 s.And = v
14543 return s
14544}
14545
14546// SetPrefix sets the Prefix field's value.
14547func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter {
14548 s.Prefix = &v
14549 return s
14550}
14551
14552// SetTag sets the Tag field's value.
14553func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter {
14554 s.Tag = v
14555 return s
14556}
14557
14558// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload
14559type MultipartUpload struct {
14560 _ struct{} `type:"structure"`
14561
14562 // Date and time at which the multipart upload was initiated.
14563 Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"`
14564
14565 // Identifies who initiated the multipart upload.
14566 Initiator *Initiator `type:"structure"`
14567
14568 // Key of the object for which the multipart upload was initiated.
14569 Key *string `min:"1" type:"string"`
14570
14571 Owner *Owner `type:"structure"`
14572
14573 // The class of storage used to store the object.
14574 StorageClass *string `type:"string" enum:"StorageClass"`
14575
14576 // Upload ID that identifies the multipart upload.
14577 UploadId *string `type:"string"`
14578}
14579
14580// String returns the string representation
14581func (s MultipartUpload) String() string {
14582 return awsutil.Prettify(s)
14583}
14584
14585// GoString returns the string representation
14586func (s MultipartUpload) GoString() string {
14587 return s.String()
14588}
14589
14590// SetInitiated sets the Initiated field's value.
14591func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload {
14592 s.Initiated = &v
14593 return s
14594}
14595
14596// SetInitiator sets the Initiator field's value.
14597func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload {
14598 s.Initiator = v
14599 return s
14600}
14601
14602// SetKey sets the Key field's value.
14603func (s *MultipartUpload) SetKey(v string) *MultipartUpload {
14604 s.Key = &v
14605 return s
14606}
14607
14608// SetOwner sets the Owner field's value.
14609func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload {
14610 s.Owner = v
14611 return s
14612}
14613
14614// SetStorageClass sets the StorageClass field's value.
14615func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload {
14616 s.StorageClass = &v
14617 return s
14618}
14619
14620// SetUploadId sets the UploadId field's value.
14621func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload {
14622 s.UploadId = &v
14623 return s
14624}
14625
14626// Specifies when noncurrent object versions expire. Upon expiration, Amazon
14627// S3 permanently deletes the noncurrent object versions. You set this lifecycle
14628// configuration action on a bucket that has versioning enabled (or suspended)
14629// to request that Amazon S3 delete noncurrent object versions at a specific
14630// period in the object's lifetime.
14631// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration
14632type NoncurrentVersionExpiration struct {
14633 _ struct{} `type:"structure"`
14634
14635 // Specifies the number of days an object is noncurrent before Amazon S3 can
14636 // perform the associated action. For information about the noncurrent days
14637 // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
14638 // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
14639 NoncurrentDays *int64 `type:"integer"`
14640}
14641
14642// String returns the string representation
14643func (s NoncurrentVersionExpiration) String() string {
14644 return awsutil.Prettify(s)
14645}
14646
14647// GoString returns the string representation
14648func (s NoncurrentVersionExpiration) GoString() string {
14649 return s.String()
14650}
14651
14652// SetNoncurrentDays sets the NoncurrentDays field's value.
14653func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration {
14654 s.NoncurrentDays = &v
14655 return s
14656}
14657
14658// Container for the transition rule that describes when noncurrent objects
14659// transition to the STANDARD_IA or GLACIER storage class. If your bucket is
14660// versioning-enabled (or versioning is suspended), you can set this action
14661// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA
14662// or GLACIER storage class at a specific period in the object's lifetime.
14663// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition
14664type NoncurrentVersionTransition struct {
14665 _ struct{} `type:"structure"`
14666
14667 // Specifies the number of days an object is noncurrent before Amazon S3 can
14668 // perform the associated action. For information about the noncurrent days
14669 // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
14670 // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
14671 NoncurrentDays *int64 `type:"integer"`
14672
14673 // The class of storage used to store the object.
14674 StorageClass *string `type:"string" enum:"TransitionStorageClass"`
14675}
14676
14677// String returns the string representation
14678func (s NoncurrentVersionTransition) String() string {
14679 return awsutil.Prettify(s)
14680}
14681
14682// GoString returns the string representation
14683func (s NoncurrentVersionTransition) GoString() string {
14684 return s.String()
14685}
14686
14687// SetNoncurrentDays sets the NoncurrentDays field's value.
14688func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition {
14689 s.NoncurrentDays = &v
14690 return s
14691}
14692
14693// SetStorageClass sets the StorageClass field's value.
14694func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition {
14695 s.StorageClass = &v
14696 return s
14697}
14698
14699// Container for specifying the notification configuration of the bucket. If
14700// this element is empty, notifications are turned off on the bucket.
14701// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration
14702type NotificationConfiguration struct {
14703 _ struct{} `type:"structure"`
14704
14705 LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"`
14706
14707 QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"`
14708
14709 TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"`
14710}
14711
14712// String returns the string representation
14713func (s NotificationConfiguration) String() string {
14714 return awsutil.Prettify(s)
14715}
14716
14717// GoString returns the string representation
14718func (s NotificationConfiguration) GoString() string {
14719 return s.String()
14720}
14721
14722// Validate inspects the fields of the type to determine if they are valid.
14723func (s *NotificationConfiguration) Validate() error {
14724 invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"}
14725 if s.LambdaFunctionConfigurations != nil {
14726 for i, v := range s.LambdaFunctionConfigurations {
14727 if v == nil {
14728 continue
14729 }
14730 if err := v.Validate(); err != nil {
14731 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams))
14732 }
14733 }
14734 }
14735 if s.QueueConfigurations != nil {
14736 for i, v := range s.QueueConfigurations {
14737 if v == nil {
14738 continue
14739 }
14740 if err := v.Validate(); err != nil {
14741 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams))
14742 }
14743 }
14744 }
14745 if s.TopicConfigurations != nil {
14746 for i, v := range s.TopicConfigurations {
14747 if v == nil {
14748 continue
14749 }
14750 if err := v.Validate(); err != nil {
14751 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams))
14752 }
14753 }
14754 }
14755
14756 if invalidParams.Len() > 0 {
14757 return invalidParams
14758 }
14759 return nil
14760}
14761
14762// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value.
14763func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration {
14764 s.LambdaFunctionConfigurations = v
14765 return s
14766}
14767
14768// SetQueueConfigurations sets the QueueConfigurations field's value.
14769func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration {
14770 s.QueueConfigurations = v
14771 return s
14772}
14773
14774// SetTopicConfigurations sets the TopicConfigurations field's value.
14775func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration {
14776 s.TopicConfigurations = v
14777 return s
14778}
14779
14780// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated
14781type NotificationConfigurationDeprecated struct {
14782 _ struct{} `type:"structure"`
14783
14784 CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"`
14785
14786 QueueConfiguration *QueueConfigurationDeprecated `type:"structure"`
14787
14788 TopicConfiguration *TopicConfigurationDeprecated `type:"structure"`
14789}
14790
14791// String returns the string representation
14792func (s NotificationConfigurationDeprecated) String() string {
14793 return awsutil.Prettify(s)
14794}
14795
14796// GoString returns the string representation
14797func (s NotificationConfigurationDeprecated) GoString() string {
14798 return s.String()
14799}
14800
14801// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value.
14802func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated {
14803 s.CloudFunctionConfiguration = v
14804 return s
14805}
14806
14807// SetQueueConfiguration sets the QueueConfiguration field's value.
14808func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated {
14809 s.QueueConfiguration = v
14810 return s
14811}
14812
14813// SetTopicConfiguration sets the TopicConfiguration field's value.
14814func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated {
14815 s.TopicConfiguration = v
14816 return s
14817}
14818
14819// Container for object key name filtering rules. For information about key
14820// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
14821// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter
14822type NotificationConfigurationFilter struct {
14823 _ struct{} `type:"structure"`
14824
14825 // Container for object key name prefix and suffix filtering rules.
14826 Key *KeyFilter `locationName:"S3Key" type:"structure"`
14827}
14828
14829// String returns the string representation
14830func (s NotificationConfigurationFilter) String() string {
14831 return awsutil.Prettify(s)
14832}
14833
14834// GoString returns the string representation
14835func (s NotificationConfigurationFilter) GoString() string {
14836 return s.String()
14837}
14838
14839// SetKey sets the Key field's value.
14840func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter {
14841 s.Key = v
14842 return s
14843}
14844
14845// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object
14846type Object struct {
14847 _ struct{} `type:"structure"`
14848
14849 ETag *string `type:"string"`
14850
14851 Key *string `min:"1" type:"string"`
14852
14853 LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
14854
14855 Owner *Owner `type:"structure"`
14856
14857 Size *int64 `type:"integer"`
14858
14859 // The class of storage used to store the object.
14860 StorageClass *string `type:"string" enum:"ObjectStorageClass"`
14861}
14862
14863// String returns the string representation
14864func (s Object) String() string {
14865 return awsutil.Prettify(s)
14866}
14867
14868// GoString returns the string representation
14869func (s Object) GoString() string {
14870 return s.String()
14871}
14872
14873// SetETag sets the ETag field's value.
14874func (s *Object) SetETag(v string) *Object {
14875 s.ETag = &v
14876 return s
14877}
14878
14879// SetKey sets the Key field's value.
14880func (s *Object) SetKey(v string) *Object {
14881 s.Key = &v
14882 return s
14883}
14884
14885// SetLastModified sets the LastModified field's value.
14886func (s *Object) SetLastModified(v time.Time) *Object {
14887 s.LastModified = &v
14888 return s
14889}
14890
14891// SetOwner sets the Owner field's value.
14892func (s *Object) SetOwner(v *Owner) *Object {
14893 s.Owner = v
14894 return s
14895}
14896
14897// SetSize sets the Size field's value.
14898func (s *Object) SetSize(v int64) *Object {
14899 s.Size = &v
14900 return s
14901}
14902
14903// SetStorageClass sets the StorageClass field's value.
14904func (s *Object) SetStorageClass(v string) *Object {
14905 s.StorageClass = &v
14906 return s
14907}
14908
14909// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier
14910type ObjectIdentifier struct {
14911 _ struct{} `type:"structure"`
14912
14913 // Key name of the object to delete.
14914 //
14915 // Key is a required field
14916 Key *string `min:"1" type:"string" required:"true"`
14917
14918 // VersionId for the specific version of the object to delete.
14919 VersionId *string `type:"string"`
14920}
14921
14922// String returns the string representation
14923func (s ObjectIdentifier) String() string {
14924 return awsutil.Prettify(s)
14925}
14926
14927// GoString returns the string representation
14928func (s ObjectIdentifier) GoString() string {
14929 return s.String()
14930}
14931
14932// Validate inspects the fields of the type to determine if they are valid.
14933func (s *ObjectIdentifier) Validate() error {
14934 invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"}
14935 if s.Key == nil {
14936 invalidParams.Add(request.NewErrParamRequired("Key"))
14937 }
14938 if s.Key != nil && len(*s.Key) < 1 {
14939 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
14940 }
14941
14942 if invalidParams.Len() > 0 {
14943 return invalidParams
14944 }
14945 return nil
14946}
14947
14948// SetKey sets the Key field's value.
14949func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier {
14950 s.Key = &v
14951 return s
14952}
14953
14954// SetVersionId sets the VersionId field's value.
14955func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier {
14956 s.VersionId = &v
14957 return s
14958}
14959
14960// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion
14961type ObjectVersion struct {
14962 _ struct{} `type:"structure"`
14963
14964 ETag *string `type:"string"`
14965
14966 // Specifies whether the object is (true) or is not (false) the latest version
14967 // of an object.
14968 IsLatest *bool `type:"boolean"`
14969
14970 // The object key.
14971 Key *string `min:"1" type:"string"`
14972
14973 // Date and time the object was last modified.
14974 LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
14975
14976 Owner *Owner `type:"structure"`
14977
14978 // Size in bytes of the object.
14979 Size *int64 `type:"integer"`
14980
14981 // The class of storage used to store the object.
14982 StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"`
14983
14984 // Version ID of an object.
14985 VersionId *string `type:"string"`
14986}
14987
14988// String returns the string representation
14989func (s ObjectVersion) String() string {
14990 return awsutil.Prettify(s)
14991}
14992
14993// GoString returns the string representation
14994func (s ObjectVersion) GoString() string {
14995 return s.String()
14996}
14997
14998// SetETag sets the ETag field's value.
14999func (s *ObjectVersion) SetETag(v string) *ObjectVersion {
15000 s.ETag = &v
15001 return s
15002}
15003
15004// SetIsLatest sets the IsLatest field's value.
15005func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion {
15006 s.IsLatest = &v
15007 return s
15008}
15009
15010// SetKey sets the Key field's value.
15011func (s *ObjectVersion) SetKey(v string) *ObjectVersion {
15012 s.Key = &v
15013 return s
15014}
15015
15016// SetLastModified sets the LastModified field's value.
15017func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion {
15018 s.LastModified = &v
15019 return s
15020}
15021
15022// SetOwner sets the Owner field's value.
15023func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion {
15024 s.Owner = v
15025 return s
15026}
15027
15028// SetSize sets the Size field's value.
15029func (s *ObjectVersion) SetSize(v int64) *ObjectVersion {
15030 s.Size = &v
15031 return s
15032}
15033
15034// SetStorageClass sets the StorageClass field's value.
15035func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion {
15036 s.StorageClass = &v
15037 return s
15038}
15039
15040// SetVersionId sets the VersionId field's value.
15041func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion {
15042 s.VersionId = &v
15043 return s
15044}
15045
15046// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner
15047type Owner struct {
15048 _ struct{} `type:"structure"`
15049
15050 DisplayName *string `type:"string"`
15051
15052 ID *string `type:"string"`
15053}
15054
15055// String returns the string representation
15056func (s Owner) String() string {
15057 return awsutil.Prettify(s)
15058}
15059
15060// GoString returns the string representation
15061func (s Owner) GoString() string {
15062 return s.String()
15063}
15064
15065// SetDisplayName sets the DisplayName field's value.
15066func (s *Owner) SetDisplayName(v string) *Owner {
15067 s.DisplayName = &v
15068 return s
15069}
15070
15071// SetID sets the ID field's value.
15072func (s *Owner) SetID(v string) *Owner {
15073 s.ID = &v
15074 return s
15075}
15076
15077// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part
15078type Part struct {
15079 _ struct{} `type:"structure"`
15080
15081 // Entity tag returned when the part was uploaded.
15082 ETag *string `type:"string"`
15083
15084 // Date and time at which the part was uploaded.
15085 LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
15086
15087 // Part number identifying the part. This is a positive integer between 1 and
15088 // 10,000.
15089 PartNumber *int64 `type:"integer"`
15090
15091 // Size of the uploaded part data.
15092 Size *int64 `type:"integer"`
15093}
15094
15095// String returns the string representation
15096func (s Part) String() string {
15097 return awsutil.Prettify(s)
15098}
15099
15100// GoString returns the string representation
15101func (s Part) GoString() string {
15102 return s.String()
15103}
15104
15105// SetETag sets the ETag field's value.
15106func (s *Part) SetETag(v string) *Part {
15107 s.ETag = &v
15108 return s
15109}
15110
15111// SetLastModified sets the LastModified field's value.
15112func (s *Part) SetLastModified(v time.Time) *Part {
15113 s.LastModified = &v
15114 return s
15115}
15116
15117// SetPartNumber sets the PartNumber field's value.
15118func (s *Part) SetPartNumber(v int64) *Part {
15119 s.PartNumber = &v
15120 return s
15121}
15122
15123// SetSize sets the Size field's value.
15124func (s *Part) SetSize(v int64) *Part {
15125 s.Size = &v
15126 return s
15127}
15128
15129// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest
15130type PutBucketAccelerateConfigurationInput struct {
15131 _ struct{} `type:"structure" payload:"AccelerateConfiguration"`
15132
15133 // Specifies the Accelerate Configuration you want to set for the bucket.
15134 //
15135 // AccelerateConfiguration is a required field
15136 AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"`
15137
15138 // Name of the bucket for which the accelerate configuration is set.
15139 //
15140 // Bucket is a required field
15141 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15142}
15143
15144// String returns the string representation
15145func (s PutBucketAccelerateConfigurationInput) String() string {
15146 return awsutil.Prettify(s)
15147}
15148
15149// GoString returns the string representation
15150func (s PutBucketAccelerateConfigurationInput) GoString() string {
15151 return s.String()
15152}
15153
15154// Validate inspects the fields of the type to determine if they are valid.
15155func (s *PutBucketAccelerateConfigurationInput) Validate() error {
15156 invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"}
15157 if s.AccelerateConfiguration == nil {
15158 invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration"))
15159 }
15160 if s.Bucket == nil {
15161 invalidParams.Add(request.NewErrParamRequired("Bucket"))
15162 }
15163
15164 if invalidParams.Len() > 0 {
15165 return invalidParams
15166 }
15167 return nil
15168}
15169
15170// SetAccelerateConfiguration sets the AccelerateConfiguration field's value.
15171func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput {
15172 s.AccelerateConfiguration = v
15173 return s
15174}
15175
15176// SetBucket sets the Bucket field's value.
15177func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput {
15178 s.Bucket = &v
15179 return s
15180}
15181
15182// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput
15183type PutBucketAccelerateConfigurationOutput struct {
15184 _ struct{} `type:"structure"`
15185}
15186
15187// String returns the string representation
15188func (s PutBucketAccelerateConfigurationOutput) String() string {
15189 return awsutil.Prettify(s)
15190}
15191
15192// GoString returns the string representation
15193func (s PutBucketAccelerateConfigurationOutput) GoString() string {
15194 return s.String()
15195}
15196
15197// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest
15198type PutBucketAclInput struct {
15199 _ struct{} `type:"structure" payload:"AccessControlPolicy"`
15200
15201 // The canned ACL to apply to the bucket.
15202 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
15203
15204 AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
15205
15206 // Bucket is a required field
15207 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15208
15209 // Allows grantee the read, write, read ACP, and write ACP permissions on the
15210 // bucket.
15211 GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
15212
15213 // Allows grantee to list the objects in the bucket.
15214 GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
15215
15216 // Allows grantee to read the bucket ACL.
15217 GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
15218
15219 // Allows grantee to create, overwrite, and delete any object in the bucket.
15220 GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
15221
15222 // Allows grantee to write the ACL for the applicable bucket.
15223 GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
15224}
15225
15226// String returns the string representation
15227func (s PutBucketAclInput) String() string {
15228 return awsutil.Prettify(s)
15229}
15230
15231// GoString returns the string representation
15232func (s PutBucketAclInput) GoString() string {
15233 return s.String()
15234}
15235
15236// Validate inspects the fields of the type to determine if they are valid.
15237func (s *PutBucketAclInput) Validate() error {
15238 invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"}
15239 if s.Bucket == nil {
15240 invalidParams.Add(request.NewErrParamRequired("Bucket"))
15241 }
15242 if s.AccessControlPolicy != nil {
15243 if err := s.AccessControlPolicy.Validate(); err != nil {
15244 invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams))
15245 }
15246 }
15247
15248 if invalidParams.Len() > 0 {
15249 return invalidParams
15250 }
15251 return nil
15252}
15253
15254// SetACL sets the ACL field's value.
15255func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput {
15256 s.ACL = &v
15257 return s
15258}
15259
15260// SetAccessControlPolicy sets the AccessControlPolicy field's value.
15261func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput {
15262 s.AccessControlPolicy = v
15263 return s
15264}
15265
15266// SetBucket sets the Bucket field's value.
15267func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput {
15268 s.Bucket = &v
15269 return s
15270}
15271
15272// SetGrantFullControl sets the GrantFullControl field's value.
15273func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput {
15274 s.GrantFullControl = &v
15275 return s
15276}
15277
15278// SetGrantRead sets the GrantRead field's value.
15279func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput {
15280 s.GrantRead = &v
15281 return s
15282}
15283
15284// SetGrantReadACP sets the GrantReadACP field's value.
15285func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput {
15286 s.GrantReadACP = &v
15287 return s
15288}
15289
15290// SetGrantWrite sets the GrantWrite field's value.
15291func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput {
15292 s.GrantWrite = &v
15293 return s
15294}
15295
15296// SetGrantWriteACP sets the GrantWriteACP field's value.
15297func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput {
15298 s.GrantWriteACP = &v
15299 return s
15300}
15301
15302// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput
15303type PutBucketAclOutput struct {
15304 _ struct{} `type:"structure"`
15305}
15306
15307// String returns the string representation
15308func (s PutBucketAclOutput) String() string {
15309 return awsutil.Prettify(s)
15310}
15311
15312// GoString returns the string representation
15313func (s PutBucketAclOutput) GoString() string {
15314 return s.String()
15315}
15316
15317// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest
15318type PutBucketAnalyticsConfigurationInput struct {
15319 _ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
15320
15321 // The configuration and any analyses for the analytics filter.
15322 //
15323 // AnalyticsConfiguration is a required field
15324 AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true"`
15325
15326 // The name of the bucket to which an analytics configuration is stored.
15327 //
15328 // Bucket is a required field
15329 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15330
15331 // The identifier used to represent an analytics configuration.
15332 //
15333 // Id is a required field
15334 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
15335}
15336
15337// String returns the string representation
15338func (s PutBucketAnalyticsConfigurationInput) String() string {
15339 return awsutil.Prettify(s)
15340}
15341
15342// GoString returns the string representation
15343func (s PutBucketAnalyticsConfigurationInput) GoString() string {
15344 return s.String()
15345}
15346
15347// Validate inspects the fields of the type to determine if they are valid.
15348func (s *PutBucketAnalyticsConfigurationInput) Validate() error {
15349 invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"}
15350 if s.AnalyticsConfiguration == nil {
15351 invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration"))
15352 }
15353 if s.Bucket == nil {
15354 invalidParams.Add(request.NewErrParamRequired("Bucket"))
15355 }
15356 if s.Id == nil {
15357 invalidParams.Add(request.NewErrParamRequired("Id"))
15358 }
15359 if s.AnalyticsConfiguration != nil {
15360 if err := s.AnalyticsConfiguration.Validate(); err != nil {
15361 invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams))
15362 }
15363 }
15364
15365 if invalidParams.Len() > 0 {
15366 return invalidParams
15367 }
15368 return nil
15369}
15370
15371// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value.
15372func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput {
15373 s.AnalyticsConfiguration = v
15374 return s
15375}
15376
15377// SetBucket sets the Bucket field's value.
15378func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput {
15379 s.Bucket = &v
15380 return s
15381}
15382
15383// SetId sets the Id field's value.
15384func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput {
15385 s.Id = &v
15386 return s
15387}
15388
15389// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput
15390type PutBucketAnalyticsConfigurationOutput struct {
15391 _ struct{} `type:"structure"`
15392}
15393
15394// String returns the string representation
15395func (s PutBucketAnalyticsConfigurationOutput) String() string {
15396 return awsutil.Prettify(s)
15397}
15398
15399// GoString returns the string representation
15400func (s PutBucketAnalyticsConfigurationOutput) GoString() string {
15401 return s.String()
15402}
15403
15404// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest
15405type PutBucketCorsInput struct {
15406 _ struct{} `type:"structure" payload:"CORSConfiguration"`
15407
15408 // Bucket is a required field
15409 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15410
15411 // CORSConfiguration is a required field
15412 CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"`
15413}
15414
15415// String returns the string representation
15416func (s PutBucketCorsInput) String() string {
15417 return awsutil.Prettify(s)
15418}
15419
15420// GoString returns the string representation
15421func (s PutBucketCorsInput) GoString() string {
15422 return s.String()
15423}
15424
15425// Validate inspects the fields of the type to determine if they are valid.
15426func (s *PutBucketCorsInput) Validate() error {
15427 invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"}
15428 if s.Bucket == nil {
15429 invalidParams.Add(request.NewErrParamRequired("Bucket"))
15430 }
15431 if s.CORSConfiguration == nil {
15432 invalidParams.Add(request.NewErrParamRequired("CORSConfiguration"))
15433 }
15434 if s.CORSConfiguration != nil {
15435 if err := s.CORSConfiguration.Validate(); err != nil {
15436 invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams))
15437 }
15438 }
15439
15440 if invalidParams.Len() > 0 {
15441 return invalidParams
15442 }
15443 return nil
15444}
15445
15446// SetBucket sets the Bucket field's value.
15447func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput {
15448 s.Bucket = &v
15449 return s
15450}
15451
15452// SetCORSConfiguration sets the CORSConfiguration field's value.
15453func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput {
15454 s.CORSConfiguration = v
15455 return s
15456}
15457
15458// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput
15459type PutBucketCorsOutput struct {
15460 _ struct{} `type:"structure"`
15461}
15462
15463// String returns the string representation
15464func (s PutBucketCorsOutput) String() string {
15465 return awsutil.Prettify(s)
15466}
15467
15468// GoString returns the string representation
15469func (s PutBucketCorsOutput) GoString() string {
15470 return s.String()
15471}
15472
15473// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest
15474type PutBucketInventoryConfigurationInput struct {
15475 _ struct{} `type:"structure" payload:"InventoryConfiguration"`
15476
15477 // The name of the bucket where the inventory configuration will be stored.
15478 //
15479 // Bucket is a required field
15480 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15481
15482 // The ID used to identify the inventory configuration.
15483 //
15484 // Id is a required field
15485 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
15486
15487 // Specifies the inventory configuration.
15488 //
15489 // InventoryConfiguration is a required field
15490 InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"`
15491}
15492
15493// String returns the string representation
15494func (s PutBucketInventoryConfigurationInput) String() string {
15495 return awsutil.Prettify(s)
15496}
15497
15498// GoString returns the string representation
15499func (s PutBucketInventoryConfigurationInput) GoString() string {
15500 return s.String()
15501}
15502
15503// Validate inspects the fields of the type to determine if they are valid.
15504func (s *PutBucketInventoryConfigurationInput) Validate() error {
15505 invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"}
15506 if s.Bucket == nil {
15507 invalidParams.Add(request.NewErrParamRequired("Bucket"))
15508 }
15509 if s.Id == nil {
15510 invalidParams.Add(request.NewErrParamRequired("Id"))
15511 }
15512 if s.InventoryConfiguration == nil {
15513 invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration"))
15514 }
15515 if s.InventoryConfiguration != nil {
15516 if err := s.InventoryConfiguration.Validate(); err != nil {
15517 invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams))
15518 }
15519 }
15520
15521 if invalidParams.Len() > 0 {
15522 return invalidParams
15523 }
15524 return nil
15525}
15526
15527// SetBucket sets the Bucket field's value.
15528func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput {
15529 s.Bucket = &v
15530 return s
15531}
15532
15533// SetId sets the Id field's value.
15534func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput {
15535 s.Id = &v
15536 return s
15537}
15538
15539// SetInventoryConfiguration sets the InventoryConfiguration field's value.
15540func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput {
15541 s.InventoryConfiguration = v
15542 return s
15543}
15544
15545// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput
15546type PutBucketInventoryConfigurationOutput struct {
15547 _ struct{} `type:"structure"`
15548}
15549
15550// String returns the string representation
15551func (s PutBucketInventoryConfigurationOutput) String() string {
15552 return awsutil.Prettify(s)
15553}
15554
15555// GoString returns the string representation
15556func (s PutBucketInventoryConfigurationOutput) GoString() string {
15557 return s.String()
15558}
15559
15560// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest
15561type PutBucketLifecycleConfigurationInput struct {
15562 _ struct{} `type:"structure" payload:"LifecycleConfiguration"`
15563
15564 // Bucket is a required field
15565 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15566
15567 LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"`
15568}
15569
15570// String returns the string representation
15571func (s PutBucketLifecycleConfigurationInput) String() string {
15572 return awsutil.Prettify(s)
15573}
15574
15575// GoString returns the string representation
15576func (s PutBucketLifecycleConfigurationInput) GoString() string {
15577 return s.String()
15578}
15579
15580// Validate inspects the fields of the type to determine if they are valid.
15581func (s *PutBucketLifecycleConfigurationInput) Validate() error {
15582 invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"}
15583 if s.Bucket == nil {
15584 invalidParams.Add(request.NewErrParamRequired("Bucket"))
15585 }
15586 if s.LifecycleConfiguration != nil {
15587 if err := s.LifecycleConfiguration.Validate(); err != nil {
15588 invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams))
15589 }
15590 }
15591
15592 if invalidParams.Len() > 0 {
15593 return invalidParams
15594 }
15595 return nil
15596}
15597
15598// SetBucket sets the Bucket field's value.
15599func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput {
15600 s.Bucket = &v
15601 return s
15602}
15603
15604// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
15605func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput {
15606 s.LifecycleConfiguration = v
15607 return s
15608}
15609
15610// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput
15611type PutBucketLifecycleConfigurationOutput struct {
15612 _ struct{} `type:"structure"`
15613}
15614
15615// String returns the string representation
15616func (s PutBucketLifecycleConfigurationOutput) String() string {
15617 return awsutil.Prettify(s)
15618}
15619
15620// GoString returns the string representation
15621func (s PutBucketLifecycleConfigurationOutput) GoString() string {
15622 return s.String()
15623}
15624
15625// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest
15626type PutBucketLifecycleInput struct {
15627 _ struct{} `type:"structure" payload:"LifecycleConfiguration"`
15628
15629 // Bucket is a required field
15630 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15631
15632 LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"`
15633}
15634
15635// String returns the string representation
15636func (s PutBucketLifecycleInput) String() string {
15637 return awsutil.Prettify(s)
15638}
15639
15640// GoString returns the string representation
15641func (s PutBucketLifecycleInput) GoString() string {
15642 return s.String()
15643}
15644
15645// Validate inspects the fields of the type to determine if they are valid.
15646func (s *PutBucketLifecycleInput) Validate() error {
15647 invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"}
15648 if s.Bucket == nil {
15649 invalidParams.Add(request.NewErrParamRequired("Bucket"))
15650 }
15651 if s.LifecycleConfiguration != nil {
15652 if err := s.LifecycleConfiguration.Validate(); err != nil {
15653 invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams))
15654 }
15655 }
15656
15657 if invalidParams.Len() > 0 {
15658 return invalidParams
15659 }
15660 return nil
15661}
15662
15663// SetBucket sets the Bucket field's value.
15664func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput {
15665 s.Bucket = &v
15666 return s
15667}
15668
15669// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
15670func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput {
15671 s.LifecycleConfiguration = v
15672 return s
15673}
15674
15675// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput
15676type PutBucketLifecycleOutput struct {
15677 _ struct{} `type:"structure"`
15678}
15679
15680// String returns the string representation
15681func (s PutBucketLifecycleOutput) String() string {
15682 return awsutil.Prettify(s)
15683}
15684
15685// GoString returns the string representation
15686func (s PutBucketLifecycleOutput) GoString() string {
15687 return s.String()
15688}
15689
15690// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest
15691type PutBucketLoggingInput struct {
15692 _ struct{} `type:"structure" payload:"BucketLoggingStatus"`
15693
15694 // Bucket is a required field
15695 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15696
15697 // BucketLoggingStatus is a required field
15698 BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"`
15699}
15700
15701// String returns the string representation
15702func (s PutBucketLoggingInput) String() string {
15703 return awsutil.Prettify(s)
15704}
15705
15706// GoString returns the string representation
15707func (s PutBucketLoggingInput) GoString() string {
15708 return s.String()
15709}
15710
15711// Validate inspects the fields of the type to determine if they are valid.
15712func (s *PutBucketLoggingInput) Validate() error {
15713 invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"}
15714 if s.Bucket == nil {
15715 invalidParams.Add(request.NewErrParamRequired("Bucket"))
15716 }
15717 if s.BucketLoggingStatus == nil {
15718 invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus"))
15719 }
15720 if s.BucketLoggingStatus != nil {
15721 if err := s.BucketLoggingStatus.Validate(); err != nil {
15722 invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams))
15723 }
15724 }
15725
15726 if invalidParams.Len() > 0 {
15727 return invalidParams
15728 }
15729 return nil
15730}
15731
15732// SetBucket sets the Bucket field's value.
15733func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput {
15734 s.Bucket = &v
15735 return s
15736}
15737
15738// SetBucketLoggingStatus sets the BucketLoggingStatus field's value.
15739func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput {
15740 s.BucketLoggingStatus = v
15741 return s
15742}
15743
15744// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput
15745type PutBucketLoggingOutput struct {
15746 _ struct{} `type:"structure"`
15747}
15748
15749// String returns the string representation
15750func (s PutBucketLoggingOutput) String() string {
15751 return awsutil.Prettify(s)
15752}
15753
15754// GoString returns the string representation
15755func (s PutBucketLoggingOutput) GoString() string {
15756 return s.String()
15757}
15758
15759// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest
15760type PutBucketMetricsConfigurationInput struct {
15761 _ struct{} `type:"structure" payload:"MetricsConfiguration"`
15762
15763 // The name of the bucket for which the metrics configuration is set.
15764 //
15765 // Bucket is a required field
15766 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15767
15768 // The ID used to identify the metrics configuration.
15769 //
15770 // Id is a required field
15771 Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
15772
15773 // Specifies the metrics configuration.
15774 //
15775 // MetricsConfiguration is a required field
15776 MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true"`
15777}
15778
15779// String returns the string representation
15780func (s PutBucketMetricsConfigurationInput) String() string {
15781 return awsutil.Prettify(s)
15782}
15783
15784// GoString returns the string representation
15785func (s PutBucketMetricsConfigurationInput) GoString() string {
15786 return s.String()
15787}
15788
15789// Validate inspects the fields of the type to determine if they are valid.
15790func (s *PutBucketMetricsConfigurationInput) Validate() error {
15791 invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"}
15792 if s.Bucket == nil {
15793 invalidParams.Add(request.NewErrParamRequired("Bucket"))
15794 }
15795 if s.Id == nil {
15796 invalidParams.Add(request.NewErrParamRequired("Id"))
15797 }
15798 if s.MetricsConfiguration == nil {
15799 invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration"))
15800 }
15801 if s.MetricsConfiguration != nil {
15802 if err := s.MetricsConfiguration.Validate(); err != nil {
15803 invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams))
15804 }
15805 }
15806
15807 if invalidParams.Len() > 0 {
15808 return invalidParams
15809 }
15810 return nil
15811}
15812
15813// SetBucket sets the Bucket field's value.
15814func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput {
15815 s.Bucket = &v
15816 return s
15817}
15818
15819// SetId sets the Id field's value.
15820func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput {
15821 s.Id = &v
15822 return s
15823}
15824
15825// SetMetricsConfiguration sets the MetricsConfiguration field's value.
15826func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput {
15827 s.MetricsConfiguration = v
15828 return s
15829}
15830
15831// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput
15832type PutBucketMetricsConfigurationOutput struct {
15833 _ struct{} `type:"structure"`
15834}
15835
15836// String returns the string representation
15837func (s PutBucketMetricsConfigurationOutput) String() string {
15838 return awsutil.Prettify(s)
15839}
15840
15841// GoString returns the string representation
15842func (s PutBucketMetricsConfigurationOutput) GoString() string {
15843 return s.String()
15844}
15845
15846// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest
15847type PutBucketNotificationConfigurationInput struct {
15848 _ struct{} `type:"structure" payload:"NotificationConfiguration"`
15849
15850 // Bucket is a required field
15851 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15852
15853 // Container for specifying the notification configuration of the bucket. If
15854 // this element is empty, notifications are turned off on the bucket.
15855 //
15856 // NotificationConfiguration is a required field
15857 NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"`
15858}
15859
15860// String returns the string representation
15861func (s PutBucketNotificationConfigurationInput) String() string {
15862 return awsutil.Prettify(s)
15863}
15864
15865// GoString returns the string representation
15866func (s PutBucketNotificationConfigurationInput) GoString() string {
15867 return s.String()
15868}
15869
15870// Validate inspects the fields of the type to determine if they are valid.
15871func (s *PutBucketNotificationConfigurationInput) Validate() error {
15872 invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"}
15873 if s.Bucket == nil {
15874 invalidParams.Add(request.NewErrParamRequired("Bucket"))
15875 }
15876 if s.NotificationConfiguration == nil {
15877 invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration"))
15878 }
15879 if s.NotificationConfiguration != nil {
15880 if err := s.NotificationConfiguration.Validate(); err != nil {
15881 invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams))
15882 }
15883 }
15884
15885 if invalidParams.Len() > 0 {
15886 return invalidParams
15887 }
15888 return nil
15889}
15890
15891// SetBucket sets the Bucket field's value.
15892func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput {
15893 s.Bucket = &v
15894 return s
15895}
15896
15897// SetNotificationConfiguration sets the NotificationConfiguration field's value.
15898func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput {
15899 s.NotificationConfiguration = v
15900 return s
15901}
15902
15903// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput
15904type PutBucketNotificationConfigurationOutput struct {
15905 _ struct{} `type:"structure"`
15906}
15907
15908// String returns the string representation
15909func (s PutBucketNotificationConfigurationOutput) String() string {
15910 return awsutil.Prettify(s)
15911}
15912
15913// GoString returns the string representation
15914func (s PutBucketNotificationConfigurationOutput) GoString() string {
15915 return s.String()
15916}
15917
15918// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest
15919type PutBucketNotificationInput struct {
15920 _ struct{} `type:"structure" payload:"NotificationConfiguration"`
15921
15922 // Bucket is a required field
15923 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15924
15925 // NotificationConfiguration is a required field
15926 NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"`
15927}
15928
15929// String returns the string representation
15930func (s PutBucketNotificationInput) String() string {
15931 return awsutil.Prettify(s)
15932}
15933
15934// GoString returns the string representation
15935func (s PutBucketNotificationInput) GoString() string {
15936 return s.String()
15937}
15938
15939// Validate inspects the fields of the type to determine if they are valid.
15940func (s *PutBucketNotificationInput) Validate() error {
15941 invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"}
15942 if s.Bucket == nil {
15943 invalidParams.Add(request.NewErrParamRequired("Bucket"))
15944 }
15945 if s.NotificationConfiguration == nil {
15946 invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration"))
15947 }
15948
15949 if invalidParams.Len() > 0 {
15950 return invalidParams
15951 }
15952 return nil
15953}
15954
15955// SetBucket sets the Bucket field's value.
15956func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput {
15957 s.Bucket = &v
15958 return s
15959}
15960
15961// SetNotificationConfiguration sets the NotificationConfiguration field's value.
15962func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput {
15963 s.NotificationConfiguration = v
15964 return s
15965}
15966
15967// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput
15968type PutBucketNotificationOutput struct {
15969 _ struct{} `type:"structure"`
15970}
15971
15972// String returns the string representation
15973func (s PutBucketNotificationOutput) String() string {
15974 return awsutil.Prettify(s)
15975}
15976
15977// GoString returns the string representation
15978func (s PutBucketNotificationOutput) GoString() string {
15979 return s.String()
15980}
15981
15982// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest
15983type PutBucketPolicyInput struct {
15984 _ struct{} `type:"structure" payload:"Policy"`
15985
15986 // Bucket is a required field
15987 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
15988
15989 // The bucket policy as a JSON document.
15990 //
15991 // Policy is a required field
15992 Policy *string `type:"string" required:"true"`
15993}
15994
15995// String returns the string representation
15996func (s PutBucketPolicyInput) String() string {
15997 return awsutil.Prettify(s)
15998}
15999
16000// GoString returns the string representation
16001func (s PutBucketPolicyInput) GoString() string {
16002 return s.String()
16003}
16004
16005// Validate inspects the fields of the type to determine if they are valid.
16006func (s *PutBucketPolicyInput) Validate() error {
16007 invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"}
16008 if s.Bucket == nil {
16009 invalidParams.Add(request.NewErrParamRequired("Bucket"))
16010 }
16011 if s.Policy == nil {
16012 invalidParams.Add(request.NewErrParamRequired("Policy"))
16013 }
16014
16015 if invalidParams.Len() > 0 {
16016 return invalidParams
16017 }
16018 return nil
16019}
16020
16021// SetBucket sets the Bucket field's value.
16022func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput {
16023 s.Bucket = &v
16024 return s
16025}
16026
16027// SetPolicy sets the Policy field's value.
16028func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput {
16029 s.Policy = &v
16030 return s
16031}
16032
16033// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput
16034type PutBucketPolicyOutput struct {
16035 _ struct{} `type:"structure"`
16036}
16037
16038// String returns the string representation
16039func (s PutBucketPolicyOutput) String() string {
16040 return awsutil.Prettify(s)
16041}
16042
16043// GoString returns the string representation
16044func (s PutBucketPolicyOutput) GoString() string {
16045 return s.String()
16046}
16047
16048// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest
16049type PutBucketReplicationInput struct {
16050 _ struct{} `type:"structure" payload:"ReplicationConfiguration"`
16051
16052 // Bucket is a required field
16053 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
16054
16055 // Container for replication rules. You can add as many as 1,000 rules. Total
16056 // replication configuration size can be up to 2 MB.
16057 //
16058 // ReplicationConfiguration is a required field
16059 ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"`
16060}
16061
16062// String returns the string representation
16063func (s PutBucketReplicationInput) String() string {
16064 return awsutil.Prettify(s)
16065}
16066
16067// GoString returns the string representation
16068func (s PutBucketReplicationInput) GoString() string {
16069 return s.String()
16070}
16071
16072// Validate inspects the fields of the type to determine if they are valid.
16073func (s *PutBucketReplicationInput) Validate() error {
16074 invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"}
16075 if s.Bucket == nil {
16076 invalidParams.Add(request.NewErrParamRequired("Bucket"))
16077 }
16078 if s.ReplicationConfiguration == nil {
16079 invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration"))
16080 }
16081 if s.ReplicationConfiguration != nil {
16082 if err := s.ReplicationConfiguration.Validate(); err != nil {
16083 invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams))
16084 }
16085 }
16086
16087 if invalidParams.Len() > 0 {
16088 return invalidParams
16089 }
16090 return nil
16091}
16092
16093// SetBucket sets the Bucket field's value.
16094func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput {
16095 s.Bucket = &v
16096 return s
16097}
16098
16099// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
16100func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput {
16101 s.ReplicationConfiguration = v
16102 return s
16103}
16104
16105// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput
16106type PutBucketReplicationOutput struct {
16107 _ struct{} `type:"structure"`
16108}
16109
16110// String returns the string representation
16111func (s PutBucketReplicationOutput) String() string {
16112 return awsutil.Prettify(s)
16113}
16114
16115// GoString returns the string representation
16116func (s PutBucketReplicationOutput) GoString() string {
16117 return s.String()
16118}
16119
16120// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest
16121type PutBucketRequestPaymentInput struct {
16122 _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"`
16123
16124 // Bucket is a required field
16125 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
16126
16127 // RequestPaymentConfiguration is a required field
16128 RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"`
16129}
16130
16131// String returns the string representation
16132func (s PutBucketRequestPaymentInput) String() string {
16133 return awsutil.Prettify(s)
16134}
16135
16136// GoString returns the string representation
16137func (s PutBucketRequestPaymentInput) GoString() string {
16138 return s.String()
16139}
16140
16141// Validate inspects the fields of the type to determine if they are valid.
16142func (s *PutBucketRequestPaymentInput) Validate() error {
16143 invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"}
16144 if s.Bucket == nil {
16145 invalidParams.Add(request.NewErrParamRequired("Bucket"))
16146 }
16147 if s.RequestPaymentConfiguration == nil {
16148 invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration"))
16149 }
16150 if s.RequestPaymentConfiguration != nil {
16151 if err := s.RequestPaymentConfiguration.Validate(); err != nil {
16152 invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams))
16153 }
16154 }
16155
16156 if invalidParams.Len() > 0 {
16157 return invalidParams
16158 }
16159 return nil
16160}
16161
16162// SetBucket sets the Bucket field's value.
16163func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput {
16164 s.Bucket = &v
16165 return s
16166}
16167
16168// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value.
16169func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput {
16170 s.RequestPaymentConfiguration = v
16171 return s
16172}
16173
16174// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput
16175type PutBucketRequestPaymentOutput struct {
16176 _ struct{} `type:"structure"`
16177}
16178
16179// String returns the string representation
16180func (s PutBucketRequestPaymentOutput) String() string {
16181 return awsutil.Prettify(s)
16182}
16183
16184// GoString returns the string representation
16185func (s PutBucketRequestPaymentOutput) GoString() string {
16186 return s.String()
16187}
16188
16189// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest
16190type PutBucketTaggingInput struct {
16191 _ struct{} `type:"structure" payload:"Tagging"`
16192
16193 // Bucket is a required field
16194 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
16195
16196 // Tagging is a required field
16197 Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"`
16198}
16199
16200// String returns the string representation
16201func (s PutBucketTaggingInput) String() string {
16202 return awsutil.Prettify(s)
16203}
16204
16205// GoString returns the string representation
16206func (s PutBucketTaggingInput) GoString() string {
16207 return s.String()
16208}
16209
16210// Validate inspects the fields of the type to determine if they are valid.
16211func (s *PutBucketTaggingInput) Validate() error {
16212 invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"}
16213 if s.Bucket == nil {
16214 invalidParams.Add(request.NewErrParamRequired("Bucket"))
16215 }
16216 if s.Tagging == nil {
16217 invalidParams.Add(request.NewErrParamRequired("Tagging"))
16218 }
16219 if s.Tagging != nil {
16220 if err := s.Tagging.Validate(); err != nil {
16221 invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams))
16222 }
16223 }
16224
16225 if invalidParams.Len() > 0 {
16226 return invalidParams
16227 }
16228 return nil
16229}
16230
16231// SetBucket sets the Bucket field's value.
16232func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput {
16233 s.Bucket = &v
16234 return s
16235}
16236
16237// SetTagging sets the Tagging field's value.
16238func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput {
16239 s.Tagging = v
16240 return s
16241}
16242
16243// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput
16244type PutBucketTaggingOutput struct {
16245 _ struct{} `type:"structure"`
16246}
16247
16248// String returns the string representation
16249func (s PutBucketTaggingOutput) String() string {
16250 return awsutil.Prettify(s)
16251}
16252
16253// GoString returns the string representation
16254func (s PutBucketTaggingOutput) GoString() string {
16255 return s.String()
16256}
16257
16258// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest
16259type PutBucketVersioningInput struct {
16260 _ struct{} `type:"structure" payload:"VersioningConfiguration"`
16261
16262 // Bucket is a required field
16263 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
16264
16265 // The concatenation of the authentication device's serial number, a space,
16266 // and the value that is displayed on your authentication device.
16267 MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
16268
16269 // VersioningConfiguration is a required field
16270 VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"`
16271}
16272
16273// String returns the string representation
16274func (s PutBucketVersioningInput) String() string {
16275 return awsutil.Prettify(s)
16276}
16277
16278// GoString returns the string representation
16279func (s PutBucketVersioningInput) GoString() string {
16280 return s.String()
16281}
16282
16283// Validate inspects the fields of the type to determine if they are valid.
16284func (s *PutBucketVersioningInput) Validate() error {
16285 invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"}
16286 if s.Bucket == nil {
16287 invalidParams.Add(request.NewErrParamRequired("Bucket"))
16288 }
16289 if s.VersioningConfiguration == nil {
16290 invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration"))
16291 }
16292
16293 if invalidParams.Len() > 0 {
16294 return invalidParams
16295 }
16296 return nil
16297}
16298
16299// SetBucket sets the Bucket field's value.
16300func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput {
16301 s.Bucket = &v
16302 return s
16303}
16304
16305// SetMFA sets the MFA field's value.
16306func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput {
16307 s.MFA = &v
16308 return s
16309}
16310
16311// SetVersioningConfiguration sets the VersioningConfiguration field's value.
16312func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput {
16313 s.VersioningConfiguration = v
16314 return s
16315}
16316
16317// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput
16318type PutBucketVersioningOutput struct {
16319 _ struct{} `type:"structure"`
16320}
16321
16322// String returns the string representation
16323func (s PutBucketVersioningOutput) String() string {
16324 return awsutil.Prettify(s)
16325}
16326
16327// GoString returns the string representation
16328func (s PutBucketVersioningOutput) GoString() string {
16329 return s.String()
16330}
16331
16332// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest
16333type PutBucketWebsiteInput struct {
16334 _ struct{} `type:"structure" payload:"WebsiteConfiguration"`
16335
16336 // Bucket is a required field
16337 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
16338
16339 // WebsiteConfiguration is a required field
16340 WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"`
16341}
16342
16343// String returns the string representation
16344func (s PutBucketWebsiteInput) String() string {
16345 return awsutil.Prettify(s)
16346}
16347
16348// GoString returns the string representation
16349func (s PutBucketWebsiteInput) GoString() string {
16350 return s.String()
16351}
16352
16353// Validate inspects the fields of the type to determine if they are valid.
16354func (s *PutBucketWebsiteInput) Validate() error {
16355 invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"}
16356 if s.Bucket == nil {
16357 invalidParams.Add(request.NewErrParamRequired("Bucket"))
16358 }
16359 if s.WebsiteConfiguration == nil {
16360 invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration"))
16361 }
16362 if s.WebsiteConfiguration != nil {
16363 if err := s.WebsiteConfiguration.Validate(); err != nil {
16364 invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams))
16365 }
16366 }
16367
16368 if invalidParams.Len() > 0 {
16369 return invalidParams
16370 }
16371 return nil
16372}
16373
16374// SetBucket sets the Bucket field's value.
16375func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput {
16376 s.Bucket = &v
16377 return s
16378}
16379
16380// SetWebsiteConfiguration sets the WebsiteConfiguration field's value.
16381func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput {
16382 s.WebsiteConfiguration = v
16383 return s
16384}
16385
16386// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput
16387type PutBucketWebsiteOutput struct {
16388 _ struct{} `type:"structure"`
16389}
16390
16391// String returns the string representation
16392func (s PutBucketWebsiteOutput) String() string {
16393 return awsutil.Prettify(s)
16394}
16395
16396// GoString returns the string representation
16397func (s PutBucketWebsiteOutput) GoString() string {
16398 return s.String()
16399}
16400
16401// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest
16402type PutObjectAclInput struct {
16403 _ struct{} `type:"structure" payload:"AccessControlPolicy"`
16404
16405 // The canned ACL to apply to the object.
16406 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
16407
16408 AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
16409
16410 // Bucket is a required field
16411 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
16412
16413 // Allows grantee the read, write, read ACP, and write ACP permissions on the
16414 // bucket.
16415 GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
16416
16417 // Allows grantee to list the objects in the bucket.
16418 GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
16419
16420 // Allows grantee to read the bucket ACL.
16421 GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
16422
16423 // Allows grantee to create, overwrite, and delete any object in the bucket.
16424 GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
16425
16426 // Allows grantee to write the ACL for the applicable bucket.
16427 GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
16428
16429 // Key is a required field
16430 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
16431
16432 // Confirms that the requester knows that she or he will be charged for the
16433 // request. Bucket owners need not specify this parameter in their requests.
16434 // Documentation on downloading objects from requester pays buckets can be found
16435 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
16436 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
16437
16438 // VersionId used to reference a specific version of the object.
16439 VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
16440}
16441
16442// String returns the string representation
16443func (s PutObjectAclInput) String() string {
16444 return awsutil.Prettify(s)
16445}
16446
16447// GoString returns the string representation
16448func (s PutObjectAclInput) GoString() string {
16449 return s.String()
16450}
16451
16452// Validate inspects the fields of the type to determine if they are valid.
16453func (s *PutObjectAclInput) Validate() error {
16454 invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"}
16455 if s.Bucket == nil {
16456 invalidParams.Add(request.NewErrParamRequired("Bucket"))
16457 }
16458 if s.Key == nil {
16459 invalidParams.Add(request.NewErrParamRequired("Key"))
16460 }
16461 if s.Key != nil && len(*s.Key) < 1 {
16462 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
16463 }
16464 if s.AccessControlPolicy != nil {
16465 if err := s.AccessControlPolicy.Validate(); err != nil {
16466 invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams))
16467 }
16468 }
16469
16470 if invalidParams.Len() > 0 {
16471 return invalidParams
16472 }
16473 return nil
16474}
16475
16476// SetACL sets the ACL field's value.
16477func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput {
16478 s.ACL = &v
16479 return s
16480}
16481
16482// SetAccessControlPolicy sets the AccessControlPolicy field's value.
16483func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput {
16484 s.AccessControlPolicy = v
16485 return s
16486}
16487
16488// SetBucket sets the Bucket field's value.
16489func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput {
16490 s.Bucket = &v
16491 return s
16492}
16493
16494// SetGrantFullControl sets the GrantFullControl field's value.
16495func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput {
16496 s.GrantFullControl = &v
16497 return s
16498}
16499
16500// SetGrantRead sets the GrantRead field's value.
16501func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput {
16502 s.GrantRead = &v
16503 return s
16504}
16505
16506// SetGrantReadACP sets the GrantReadACP field's value.
16507func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput {
16508 s.GrantReadACP = &v
16509 return s
16510}
16511
16512// SetGrantWrite sets the GrantWrite field's value.
16513func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput {
16514 s.GrantWrite = &v
16515 return s
16516}
16517
16518// SetGrantWriteACP sets the GrantWriteACP field's value.
16519func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput {
16520 s.GrantWriteACP = &v
16521 return s
16522}
16523
16524// SetKey sets the Key field's value.
16525func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput {
16526 s.Key = &v
16527 return s
16528}
16529
16530// SetRequestPayer sets the RequestPayer field's value.
16531func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput {
16532 s.RequestPayer = &v
16533 return s
16534}
16535
16536// SetVersionId sets the VersionId field's value.
16537func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput {
16538 s.VersionId = &v
16539 return s
16540}
16541
16542// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput
16543type PutObjectAclOutput struct {
16544 _ struct{} `type:"structure"`
16545
16546 // If present, indicates that the requester was successfully charged for the
16547 // request.
16548 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
16549}
16550
16551// String returns the string representation
16552func (s PutObjectAclOutput) String() string {
16553 return awsutil.Prettify(s)
16554}
16555
16556// GoString returns the string representation
16557func (s PutObjectAclOutput) GoString() string {
16558 return s.String()
16559}
16560
16561// SetRequestCharged sets the RequestCharged field's value.
16562func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput {
16563 s.RequestCharged = &v
16564 return s
16565}
16566
16567// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest
16568type PutObjectInput struct {
16569 _ struct{} `type:"structure" payload:"Body"`
16570
16571 // The canned ACL to apply to the object.
16572 ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
16573
16574 // Object data.
16575 Body io.ReadSeeker `type:"blob"`
16576
16577 // Name of the bucket to which the PUT operation was initiated.
16578 //
16579 // Bucket is a required field
16580 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
16581
16582 // Specifies caching behavior along the request/reply chain.
16583 CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
16584
16585 // Specifies presentational information for the object.
16586 ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
16587
16588 // Specifies what content encodings have been applied to the object and thus
16589 // what decoding mechanisms must be applied to obtain the media-type referenced
16590 // by the Content-Type header field.
16591 ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
16592
16593 // The language the content is in.
16594 ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
16595
16596 // Size of the body in bytes. This parameter is useful when the size of the
16597 // body cannot be determined automatically.
16598 ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
16599
16600 // A standard MIME type describing the format of the object data.
16601 ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
16602
16603 // The date and time at which the object is no longer cacheable.
16604 Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
16605
16606 // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
16607 GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
16608
16609 // Allows grantee to read the object data and its metadata.
16610 GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
16611
16612 // Allows grantee to read the object ACL.
16613 GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
16614
16615 // Allows grantee to write the ACL for the applicable object.
16616 GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
16617
16618 // Object key for which the PUT operation was initiated.
16619 //
16620 // Key is a required field
16621 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
16622
16623 // A map of metadata to store with the object in S3.
16624 Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
16625
16626 // Confirms that the requester knows that she or he will be charged for the
16627 // request. Bucket owners need not specify this parameter in their requests.
16628 // Documentation on downloading objects from requester pays buckets can be found
16629 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
16630 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
16631
16632 // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
16633 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
16634
16635 // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
16636 // data. This value is used to store the object and then it is discarded; Amazon
16637 // does not store the encryption key. The key must be appropriate for use with
16638 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
16639 // header.
16640 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
16641
16642 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
16643 // Amazon S3 uses this header for a message integrity check to ensure the encryption
16644 // key was transmitted without error.
16645 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
16646
16647 // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
16648 // requests for an object protected by AWS KMS will fail if not made via SSL
16649 // or using SigV4. Documentation on configuring any of the officially supported
16650 // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
16651 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
16652
16653 // The Server-side encryption algorithm used when storing this object in S3
16654 // (e.g., AES256, aws:kms).
16655 ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
16656
16657 // The type of storage to use for the object. Defaults to 'STANDARD'.
16658 StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
16659
16660 // The tag-set for the object. The tag-set must be encoded as URL Query parameters
16661 Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
16662
16663 // If the bucket is configured as a website, redirects requests for this object
16664 // to another object in the same bucket or to an external URL. Amazon S3 stores
16665 // the value of this header in the object metadata.
16666 WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
16667}
16668
16669// String returns the string representation
16670func (s PutObjectInput) String() string {
16671 return awsutil.Prettify(s)
16672}
16673
16674// GoString returns the string representation
16675func (s PutObjectInput) GoString() string {
16676 return s.String()
16677}
16678
16679// Validate inspects the fields of the type to determine if they are valid.
16680func (s *PutObjectInput) Validate() error {
16681 invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"}
16682 if s.Bucket == nil {
16683 invalidParams.Add(request.NewErrParamRequired("Bucket"))
16684 }
16685 if s.Key == nil {
16686 invalidParams.Add(request.NewErrParamRequired("Key"))
16687 }
16688 if s.Key != nil && len(*s.Key) < 1 {
16689 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
16690 }
16691
16692 if invalidParams.Len() > 0 {
16693 return invalidParams
16694 }
16695 return nil
16696}
16697
16698// SetACL sets the ACL field's value.
16699func (s *PutObjectInput) SetACL(v string) *PutObjectInput {
16700 s.ACL = &v
16701 return s
16702}
16703
16704// SetBody sets the Body field's value.
16705func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput {
16706 s.Body = v
16707 return s
16708}
16709
16710// SetBucket sets the Bucket field's value.
16711func (s *PutObjectInput) SetBucket(v string) *PutObjectInput {
16712 s.Bucket = &v
16713 return s
16714}
16715
16716// SetCacheControl sets the CacheControl field's value.
16717func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput {
16718 s.CacheControl = &v
16719 return s
16720}
16721
16722// SetContentDisposition sets the ContentDisposition field's value.
16723func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput {
16724 s.ContentDisposition = &v
16725 return s
16726}
16727
16728// SetContentEncoding sets the ContentEncoding field's value.
16729func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput {
16730 s.ContentEncoding = &v
16731 return s
16732}
16733
16734// SetContentLanguage sets the ContentLanguage field's value.
16735func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput {
16736 s.ContentLanguage = &v
16737 return s
16738}
16739
16740// SetContentLength sets the ContentLength field's value.
16741func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput {
16742 s.ContentLength = &v
16743 return s
16744}
16745
16746// SetContentType sets the ContentType field's value.
16747func (s *PutObjectInput) SetContentType(v string) *PutObjectInput {
16748 s.ContentType = &v
16749 return s
16750}
16751
16752// SetExpires sets the Expires field's value.
16753func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput {
16754 s.Expires = &v
16755 return s
16756}
16757
16758// SetGrantFullControl sets the GrantFullControl field's value.
16759func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput {
16760 s.GrantFullControl = &v
16761 return s
16762}
16763
16764// SetGrantRead sets the GrantRead field's value.
16765func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput {
16766 s.GrantRead = &v
16767 return s
16768}
16769
16770// SetGrantReadACP sets the GrantReadACP field's value.
16771func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput {
16772 s.GrantReadACP = &v
16773 return s
16774}
16775
16776// SetGrantWriteACP sets the GrantWriteACP field's value.
16777func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput {
16778 s.GrantWriteACP = &v
16779 return s
16780}
16781
16782// SetKey sets the Key field's value.
16783func (s *PutObjectInput) SetKey(v string) *PutObjectInput {
16784 s.Key = &v
16785 return s
16786}
16787
16788// SetMetadata sets the Metadata field's value.
16789func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput {
16790 s.Metadata = v
16791 return s
16792}
16793
16794// SetRequestPayer sets the RequestPayer field's value.
16795func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput {
16796 s.RequestPayer = &v
16797 return s
16798}
16799
16800// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
16801func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput {
16802 s.SSECustomerAlgorithm = &v
16803 return s
16804}
16805
16806// SetSSECustomerKey sets the SSECustomerKey field's value.
16807func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput {
16808 s.SSECustomerKey = &v
16809 return s
16810}
16811
16812// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
16813func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput {
16814 s.SSECustomerKeyMD5 = &v
16815 return s
16816}
16817
16818// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
16819func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput {
16820 s.SSEKMSKeyId = &v
16821 return s
16822}
16823
16824// SetServerSideEncryption sets the ServerSideEncryption field's value.
16825func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput {
16826 s.ServerSideEncryption = &v
16827 return s
16828}
16829
16830// SetStorageClass sets the StorageClass field's value.
16831func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput {
16832 s.StorageClass = &v
16833 return s
16834}
16835
16836// SetTagging sets the Tagging field's value.
16837func (s *PutObjectInput) SetTagging(v string) *PutObjectInput {
16838 s.Tagging = &v
16839 return s
16840}
16841
16842// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
16843func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput {
16844 s.WebsiteRedirectLocation = &v
16845 return s
16846}
16847
16848// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput
16849type PutObjectOutput struct {
16850 _ struct{} `type:"structure"`
16851
16852 // Entity tag for the uploaded object.
16853 ETag *string `location:"header" locationName:"ETag" type:"string"`
16854
16855 // If the object expiration is configured, this will contain the expiration
16856 // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
16857 Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
16858
16859 // If present, indicates that the requester was successfully charged for the
16860 // request.
16861 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
16862
16863 // If server-side encryption with a customer-provided encryption key was requested,
16864 // the response will include this header confirming the encryption algorithm
16865 // used.
16866 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
16867
16868 // If server-side encryption with a customer-provided encryption key was requested,
16869 // the response will include this header to provide round trip message integrity
16870 // verification of the customer-provided encryption key.
16871 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
16872
16873 // If present, specifies the ID of the AWS Key Management Service (KMS) master
16874 // encryption key that was used for the object.
16875 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
16876
16877 // The Server-side encryption algorithm used when storing this object in S3
16878 // (e.g., AES256, aws:kms).
16879 ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
16880
16881 // Version of the object.
16882 VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
16883}
16884
16885// String returns the string representation
16886func (s PutObjectOutput) String() string {
16887 return awsutil.Prettify(s)
16888}
16889
16890// GoString returns the string representation
16891func (s PutObjectOutput) GoString() string {
16892 return s.String()
16893}
16894
16895// SetETag sets the ETag field's value.
16896func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput {
16897 s.ETag = &v
16898 return s
16899}
16900
16901// SetExpiration sets the Expiration field's value.
16902func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput {
16903 s.Expiration = &v
16904 return s
16905}
16906
16907// SetRequestCharged sets the RequestCharged field's value.
16908func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput {
16909 s.RequestCharged = &v
16910 return s
16911}
16912
16913// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
16914func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput {
16915 s.SSECustomerAlgorithm = &v
16916 return s
16917}
16918
16919// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
16920func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput {
16921 s.SSECustomerKeyMD5 = &v
16922 return s
16923}
16924
16925// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
16926func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput {
16927 s.SSEKMSKeyId = &v
16928 return s
16929}
16930
16931// SetServerSideEncryption sets the ServerSideEncryption field's value.
16932func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput {
16933 s.ServerSideEncryption = &v
16934 return s
16935}
16936
16937// SetVersionId sets the VersionId field's value.
16938func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput {
16939 s.VersionId = &v
16940 return s
16941}
16942
16943// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest
16944type PutObjectTaggingInput struct {
16945 _ struct{} `type:"structure" payload:"Tagging"`
16946
16947 // Bucket is a required field
16948 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
16949
16950 // Key is a required field
16951 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
16952
16953 // Tagging is a required field
16954 Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"`
16955
16956 VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
16957}
16958
16959// String returns the string representation
16960func (s PutObjectTaggingInput) String() string {
16961 return awsutil.Prettify(s)
16962}
16963
16964// GoString returns the string representation
16965func (s PutObjectTaggingInput) GoString() string {
16966 return s.String()
16967}
16968
16969// Validate inspects the fields of the type to determine if they are valid.
16970func (s *PutObjectTaggingInput) Validate() error {
16971 invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"}
16972 if s.Bucket == nil {
16973 invalidParams.Add(request.NewErrParamRequired("Bucket"))
16974 }
16975 if s.Key == nil {
16976 invalidParams.Add(request.NewErrParamRequired("Key"))
16977 }
16978 if s.Key != nil && len(*s.Key) < 1 {
16979 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
16980 }
16981 if s.Tagging == nil {
16982 invalidParams.Add(request.NewErrParamRequired("Tagging"))
16983 }
16984 if s.Tagging != nil {
16985 if err := s.Tagging.Validate(); err != nil {
16986 invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams))
16987 }
16988 }
16989
16990 if invalidParams.Len() > 0 {
16991 return invalidParams
16992 }
16993 return nil
16994}
16995
16996// SetBucket sets the Bucket field's value.
16997func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput {
16998 s.Bucket = &v
16999 return s
17000}
17001
17002// SetKey sets the Key field's value.
17003func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput {
17004 s.Key = &v
17005 return s
17006}
17007
17008// SetTagging sets the Tagging field's value.
17009func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput {
17010 s.Tagging = v
17011 return s
17012}
17013
17014// SetVersionId sets the VersionId field's value.
17015func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput {
17016 s.VersionId = &v
17017 return s
17018}
17019
17020// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput
17021type PutObjectTaggingOutput struct {
17022 _ struct{} `type:"structure"`
17023
17024 VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
17025}
17026
17027// String returns the string representation
17028func (s PutObjectTaggingOutput) String() string {
17029 return awsutil.Prettify(s)
17030}
17031
17032// GoString returns the string representation
17033func (s PutObjectTaggingOutput) GoString() string {
17034 return s.String()
17035}
17036
17037// SetVersionId sets the VersionId field's value.
17038func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput {
17039 s.VersionId = &v
17040 return s
17041}
17042
17043// Container for specifying an configuration when you want Amazon S3 to publish
17044// events to an Amazon Simple Queue Service (Amazon SQS) queue.
17045// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration
17046type QueueConfiguration struct {
17047 _ struct{} `type:"structure"`
17048
17049 // Events is a required field
17050 Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
17051
17052 // Container for object key name filtering rules. For information about key
17053 // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
17054 Filter *NotificationConfigurationFilter `type:"structure"`
17055
17056 // Optional unique identifier for configurations in a notification configuration.
17057 // If you don't provide one, Amazon S3 will assign an ID.
17058 Id *string `type:"string"`
17059
17060 // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects
17061 // events of specified type.
17062 //
17063 // QueueArn is a required field
17064 QueueArn *string `locationName:"Queue" type:"string" required:"true"`
17065}
17066
17067// String returns the string representation
17068func (s QueueConfiguration) String() string {
17069 return awsutil.Prettify(s)
17070}
17071
17072// GoString returns the string representation
17073func (s QueueConfiguration) GoString() string {
17074 return s.String()
17075}
17076
17077// Validate inspects the fields of the type to determine if they are valid.
17078func (s *QueueConfiguration) Validate() error {
17079 invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"}
17080 if s.Events == nil {
17081 invalidParams.Add(request.NewErrParamRequired("Events"))
17082 }
17083 if s.QueueArn == nil {
17084 invalidParams.Add(request.NewErrParamRequired("QueueArn"))
17085 }
17086
17087 if invalidParams.Len() > 0 {
17088 return invalidParams
17089 }
17090 return nil
17091}
17092
17093// SetEvents sets the Events field's value.
17094func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration {
17095 s.Events = v
17096 return s
17097}
17098
17099// SetFilter sets the Filter field's value.
17100func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration {
17101 s.Filter = v
17102 return s
17103}
17104
17105// SetId sets the Id field's value.
17106func (s *QueueConfiguration) SetId(v string) *QueueConfiguration {
17107 s.Id = &v
17108 return s
17109}
17110
17111// SetQueueArn sets the QueueArn field's value.
17112func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration {
17113 s.QueueArn = &v
17114 return s
17115}
17116
17117// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated
17118type QueueConfigurationDeprecated struct {
17119 _ struct{} `type:"structure"`
17120
17121 // Bucket event for which to send notifications.
17122 Event *string `deprecated:"true" type:"string" enum:"Event"`
17123
17124 Events []*string `locationName:"Event" type:"list" flattened:"true"`
17125
17126 // Optional unique identifier for configurations in a notification configuration.
17127 // If you don't provide one, Amazon S3 will assign an ID.
17128 Id *string `type:"string"`
17129
17130 Queue *string `type:"string"`
17131}
17132
17133// String returns the string representation
17134func (s QueueConfigurationDeprecated) String() string {
17135 return awsutil.Prettify(s)
17136}
17137
17138// GoString returns the string representation
17139func (s QueueConfigurationDeprecated) GoString() string {
17140 return s.String()
17141}
17142
17143// SetEvent sets the Event field's value.
17144func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated {
17145 s.Event = &v
17146 return s
17147}
17148
17149// SetEvents sets the Events field's value.
17150func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated {
17151 s.Events = v
17152 return s
17153}
17154
17155// SetId sets the Id field's value.
17156func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated {
17157 s.Id = &v
17158 return s
17159}
17160
17161// SetQueue sets the Queue field's value.
17162func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated {
17163 s.Queue = &v
17164 return s
17165}
17166
17167// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect
17168type Redirect struct {
17169 _ struct{} `type:"structure"`
17170
17171 // The host name to use in the redirect request.
17172 HostName *string `type:"string"`
17173
17174 // The HTTP redirect code to use on the response. Not required if one of the
17175 // siblings is present.
17176 HttpRedirectCode *string `type:"string"`
17177
17178 // Protocol to use (http, https) when redirecting requests. The default is the
17179 // protocol that is used in the original request.
17180 Protocol *string `type:"string" enum:"Protocol"`
17181
17182 // The object key prefix to use in the redirect request. For example, to redirect
17183 // requests for all pages with prefix docs/ (objects in the docs/ folder) to
17184 // documents/, you can set a condition block with KeyPrefixEquals set to docs/
17185 // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required
17186 // if one of the siblings is present. Can be present only if ReplaceKeyWith
17187 // is not provided.
17188 ReplaceKeyPrefixWith *string `type:"string"`
17189
17190 // The specific object key to use in the redirect request. For example, redirect
17191 // request to error.html. Not required if one of the sibling is present. Can
17192 // be present only if ReplaceKeyPrefixWith is not provided.
17193 ReplaceKeyWith *string `type:"string"`
17194}
17195
17196// String returns the string representation
17197func (s Redirect) String() string {
17198 return awsutil.Prettify(s)
17199}
17200
17201// GoString returns the string representation
17202func (s Redirect) GoString() string {
17203 return s.String()
17204}
17205
17206// SetHostName sets the HostName field's value.
17207func (s *Redirect) SetHostName(v string) *Redirect {
17208 s.HostName = &v
17209 return s
17210}
17211
17212// SetHttpRedirectCode sets the HttpRedirectCode field's value.
17213func (s *Redirect) SetHttpRedirectCode(v string) *Redirect {
17214 s.HttpRedirectCode = &v
17215 return s
17216}
17217
17218// SetProtocol sets the Protocol field's value.
17219func (s *Redirect) SetProtocol(v string) *Redirect {
17220 s.Protocol = &v
17221 return s
17222}
17223
17224// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value.
17225func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect {
17226 s.ReplaceKeyPrefixWith = &v
17227 return s
17228}
17229
17230// SetReplaceKeyWith sets the ReplaceKeyWith field's value.
17231func (s *Redirect) SetReplaceKeyWith(v string) *Redirect {
17232 s.ReplaceKeyWith = &v
17233 return s
17234}
17235
17236// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo
17237type RedirectAllRequestsTo struct {
17238 _ struct{} `type:"structure"`
17239
17240 // Name of the host where requests will be redirected.
17241 //
17242 // HostName is a required field
17243 HostName *string `type:"string" required:"true"`
17244
17245 // Protocol to use (http, https) when redirecting requests. The default is the
17246 // protocol that is used in the original request.
17247 Protocol *string `type:"string" enum:"Protocol"`
17248}
17249
17250// String returns the string representation
17251func (s RedirectAllRequestsTo) String() string {
17252 return awsutil.Prettify(s)
17253}
17254
17255// GoString returns the string representation
17256func (s RedirectAllRequestsTo) GoString() string {
17257 return s.String()
17258}
17259
17260// Validate inspects the fields of the type to determine if they are valid.
17261func (s *RedirectAllRequestsTo) Validate() error {
17262 invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"}
17263 if s.HostName == nil {
17264 invalidParams.Add(request.NewErrParamRequired("HostName"))
17265 }
17266
17267 if invalidParams.Len() > 0 {
17268 return invalidParams
17269 }
17270 return nil
17271}
17272
17273// SetHostName sets the HostName field's value.
17274func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo {
17275 s.HostName = &v
17276 return s
17277}
17278
17279// SetProtocol sets the Protocol field's value.
17280func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo {
17281 s.Protocol = &v
17282 return s
17283}
17284
17285// Container for replication rules. You can add as many as 1,000 rules. Total
17286// replication configuration size can be up to 2 MB.
17287// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration
17288type ReplicationConfiguration struct {
17289 _ struct{} `type:"structure"`
17290
17291 // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating
17292 // the objects.
17293 //
17294 // Role is a required field
17295 Role *string `type:"string" required:"true"`
17296
17297 // Container for information about a particular replication rule. Replication
17298 // configuration must have at least one rule and can contain up to 1,000 rules.
17299 //
17300 // Rules is a required field
17301 Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
17302}
17303
17304// String returns the string representation
17305func (s ReplicationConfiguration) String() string {
17306 return awsutil.Prettify(s)
17307}
17308
17309// GoString returns the string representation
17310func (s ReplicationConfiguration) GoString() string {
17311 return s.String()
17312}
17313
17314// Validate inspects the fields of the type to determine if they are valid.
17315func (s *ReplicationConfiguration) Validate() error {
17316 invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"}
17317 if s.Role == nil {
17318 invalidParams.Add(request.NewErrParamRequired("Role"))
17319 }
17320 if s.Rules == nil {
17321 invalidParams.Add(request.NewErrParamRequired("Rules"))
17322 }
17323 if s.Rules != nil {
17324 for i, v := range s.Rules {
17325 if v == nil {
17326 continue
17327 }
17328 if err := v.Validate(); err != nil {
17329 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
17330 }
17331 }
17332 }
17333
17334 if invalidParams.Len() > 0 {
17335 return invalidParams
17336 }
17337 return nil
17338}
17339
17340// SetRole sets the Role field's value.
17341func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration {
17342 s.Role = &v
17343 return s
17344}
17345
17346// SetRules sets the Rules field's value.
17347func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration {
17348 s.Rules = v
17349 return s
17350}
17351
17352// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule
17353type ReplicationRule struct {
17354 _ struct{} `type:"structure"`
17355
17356 // Destination is a required field
17357 Destination *Destination `type:"structure" required:"true"`
17358
17359 // Unique identifier for the rule. The value cannot be longer than 255 characters.
17360 ID *string `type:"string"`
17361
17362 // Object keyname prefix identifying one or more objects to which the rule applies.
17363 // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes
17364 // are not supported.
17365 //
17366 // Prefix is a required field
17367 Prefix *string `type:"string" required:"true"`
17368
17369 // The rule is ignored if status is not Enabled.
17370 //
17371 // Status is a required field
17372 Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"`
17373}
17374
17375// String returns the string representation
17376func (s ReplicationRule) String() string {
17377 return awsutil.Prettify(s)
17378}
17379
17380// GoString returns the string representation
17381func (s ReplicationRule) GoString() string {
17382 return s.String()
17383}
17384
17385// Validate inspects the fields of the type to determine if they are valid.
17386func (s *ReplicationRule) Validate() error {
17387 invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"}
17388 if s.Destination == nil {
17389 invalidParams.Add(request.NewErrParamRequired("Destination"))
17390 }
17391 if s.Prefix == nil {
17392 invalidParams.Add(request.NewErrParamRequired("Prefix"))
17393 }
17394 if s.Status == nil {
17395 invalidParams.Add(request.NewErrParamRequired("Status"))
17396 }
17397 if s.Destination != nil {
17398 if err := s.Destination.Validate(); err != nil {
17399 invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
17400 }
17401 }
17402
17403 if invalidParams.Len() > 0 {
17404 return invalidParams
17405 }
17406 return nil
17407}
17408
17409// SetDestination sets the Destination field's value.
17410func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule {
17411 s.Destination = v
17412 return s
17413}
17414
17415// SetID sets the ID field's value.
17416func (s *ReplicationRule) SetID(v string) *ReplicationRule {
17417 s.ID = &v
17418 return s
17419}
17420
17421// SetPrefix sets the Prefix field's value.
17422func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule {
17423 s.Prefix = &v
17424 return s
17425}
17426
17427// SetStatus sets the Status field's value.
17428func (s *ReplicationRule) SetStatus(v string) *ReplicationRule {
17429 s.Status = &v
17430 return s
17431}
17432
17433// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration
17434type RequestPaymentConfiguration struct {
17435 _ struct{} `type:"structure"`
17436
17437 // Specifies who pays for the download and request fees.
17438 //
17439 // Payer is a required field
17440 Payer *string `type:"string" required:"true" enum:"Payer"`
17441}
17442
17443// String returns the string representation
17444func (s RequestPaymentConfiguration) String() string {
17445 return awsutil.Prettify(s)
17446}
17447
17448// GoString returns the string representation
17449func (s RequestPaymentConfiguration) GoString() string {
17450 return s.String()
17451}
17452
17453// Validate inspects the fields of the type to determine if they are valid.
17454func (s *RequestPaymentConfiguration) Validate() error {
17455 invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"}
17456 if s.Payer == nil {
17457 invalidParams.Add(request.NewErrParamRequired("Payer"))
17458 }
17459
17460 if invalidParams.Len() > 0 {
17461 return invalidParams
17462 }
17463 return nil
17464}
17465
17466// SetPayer sets the Payer field's value.
17467func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration {
17468 s.Payer = &v
17469 return s
17470}
17471
17472// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest
17473type RestoreObjectInput struct {
17474 _ struct{} `type:"structure" payload:"RestoreRequest"`
17475
17476 // Bucket is a required field
17477 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
17478
17479 // Key is a required field
17480 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
17481
17482 // Confirms that the requester knows that she or he will be charged for the
17483 // request. Bucket owners need not specify this parameter in their requests.
17484 // Documentation on downloading objects from requester pays buckets can be found
17485 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
17486 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
17487
17488 RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"`
17489
17490 VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
17491}
17492
17493// String returns the string representation
17494func (s RestoreObjectInput) String() string {
17495 return awsutil.Prettify(s)
17496}
17497
17498// GoString returns the string representation
17499func (s RestoreObjectInput) GoString() string {
17500 return s.String()
17501}
17502
17503// Validate inspects the fields of the type to determine if they are valid.
17504func (s *RestoreObjectInput) Validate() error {
17505 invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"}
17506 if s.Bucket == nil {
17507 invalidParams.Add(request.NewErrParamRequired("Bucket"))
17508 }
17509 if s.Key == nil {
17510 invalidParams.Add(request.NewErrParamRequired("Key"))
17511 }
17512 if s.Key != nil && len(*s.Key) < 1 {
17513 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
17514 }
17515 if s.RestoreRequest != nil {
17516 if err := s.RestoreRequest.Validate(); err != nil {
17517 invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams))
17518 }
17519 }
17520
17521 if invalidParams.Len() > 0 {
17522 return invalidParams
17523 }
17524 return nil
17525}
17526
17527// SetBucket sets the Bucket field's value.
17528func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput {
17529 s.Bucket = &v
17530 return s
17531}
17532
17533// SetKey sets the Key field's value.
17534func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput {
17535 s.Key = &v
17536 return s
17537}
17538
17539// SetRequestPayer sets the RequestPayer field's value.
17540func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput {
17541 s.RequestPayer = &v
17542 return s
17543}
17544
17545// SetRestoreRequest sets the RestoreRequest field's value.
17546func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput {
17547 s.RestoreRequest = v
17548 return s
17549}
17550
17551// SetVersionId sets the VersionId field's value.
17552func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput {
17553 s.VersionId = &v
17554 return s
17555}
17556
17557// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput
17558type RestoreObjectOutput struct {
17559 _ struct{} `type:"structure"`
17560
17561 // If present, indicates that the requester was successfully charged for the
17562 // request.
17563 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
17564}
17565
17566// String returns the string representation
17567func (s RestoreObjectOutput) String() string {
17568 return awsutil.Prettify(s)
17569}
17570
17571// GoString returns the string representation
17572func (s RestoreObjectOutput) GoString() string {
17573 return s.String()
17574}
17575
17576// SetRequestCharged sets the RequestCharged field's value.
17577func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput {
17578 s.RequestCharged = &v
17579 return s
17580}
17581
17582// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest
17583type RestoreRequest struct {
17584 _ struct{} `type:"structure"`
17585
17586 // Lifetime of the active copy in days
17587 //
17588 // Days is a required field
17589 Days *int64 `type:"integer" required:"true"`
17590
17591 // Glacier related prameters pertaining to this job.
17592 GlacierJobParameters *GlacierJobParameters `type:"structure"`
17593}
17594
17595// String returns the string representation
17596func (s RestoreRequest) String() string {
17597 return awsutil.Prettify(s)
17598}
17599
17600// GoString returns the string representation
17601func (s RestoreRequest) GoString() string {
17602 return s.String()
17603}
17604
17605// Validate inspects the fields of the type to determine if they are valid.
17606func (s *RestoreRequest) Validate() error {
17607 invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"}
17608 if s.Days == nil {
17609 invalidParams.Add(request.NewErrParamRequired("Days"))
17610 }
17611 if s.GlacierJobParameters != nil {
17612 if err := s.GlacierJobParameters.Validate(); err != nil {
17613 invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams))
17614 }
17615 }
17616
17617 if invalidParams.Len() > 0 {
17618 return invalidParams
17619 }
17620 return nil
17621}
17622
17623// SetDays sets the Days field's value.
17624func (s *RestoreRequest) SetDays(v int64) *RestoreRequest {
17625 s.Days = &v
17626 return s
17627}
17628
17629// SetGlacierJobParameters sets the GlacierJobParameters field's value.
17630func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest {
17631 s.GlacierJobParameters = v
17632 return s
17633}
17634
17635// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule
17636type RoutingRule struct {
17637 _ struct{} `type:"structure"`
17638
17639 // A container for describing a condition that must be met for the specified
17640 // redirect to apply. For example, 1. If request is for pages in the /docs folder,
17641 // redirect to the /documents folder. 2. If request results in HTTP error 4xx,
17642 // redirect request to another host where you might process the error.
17643 Condition *Condition `type:"structure"`
17644
17645 // Container for redirect information. You can redirect requests to another
17646 // host, to another page, or with another protocol. In the event of an error,
17647 // you can can specify a different error code to return.
17648 //
17649 // Redirect is a required field
17650 Redirect *Redirect `type:"structure" required:"true"`
17651}
17652
17653// String returns the string representation
17654func (s RoutingRule) String() string {
17655 return awsutil.Prettify(s)
17656}
17657
17658// GoString returns the string representation
17659func (s RoutingRule) GoString() string {
17660 return s.String()
17661}
17662
17663// Validate inspects the fields of the type to determine if they are valid.
17664func (s *RoutingRule) Validate() error {
17665 invalidParams := request.ErrInvalidParams{Context: "RoutingRule"}
17666 if s.Redirect == nil {
17667 invalidParams.Add(request.NewErrParamRequired("Redirect"))
17668 }
17669
17670 if invalidParams.Len() > 0 {
17671 return invalidParams
17672 }
17673 return nil
17674}
17675
17676// SetCondition sets the Condition field's value.
17677func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule {
17678 s.Condition = v
17679 return s
17680}
17681
17682// SetRedirect sets the Redirect field's value.
17683func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule {
17684 s.Redirect = v
17685 return s
17686}
17687
17688// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule
17689type Rule struct {
17690 _ struct{} `type:"structure"`
17691
17692 // Specifies the days since the initiation of an Incomplete Multipart Upload
17693 // that Lifecycle will wait before permanently removing all parts of the upload.
17694 AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
17695
17696 Expiration *LifecycleExpiration `type:"structure"`
17697
17698 // Unique identifier for the rule. The value cannot be longer than 255 characters.
17699 ID *string `type:"string"`
17700
17701 // Specifies when noncurrent object versions expire. Upon expiration, Amazon
17702 // S3 permanently deletes the noncurrent object versions. You set this lifecycle
17703 // configuration action on a bucket that has versioning enabled (or suspended)
17704 // to request that Amazon S3 delete noncurrent object versions at a specific
17705 // period in the object's lifetime.
17706 NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
17707
17708 // Container for the transition rule that describes when noncurrent objects
17709 // transition to the STANDARD_IA or GLACIER storage class. If your bucket is
17710 // versioning-enabled (or versioning is suspended), you can set this action
17711 // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA
17712 // or GLACIER storage class at a specific period in the object's lifetime.
17713 NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"`
17714
17715 // Prefix identifying one or more objects to which the rule applies.
17716 //
17717 // Prefix is a required field
17718 Prefix *string `type:"string" required:"true"`
17719
17720 // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
17721 // is not currently being applied.
17722 //
17723 // Status is a required field
17724 Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
17725
17726 Transition *Transition `type:"structure"`
17727}
17728
17729// String returns the string representation
17730func (s Rule) String() string {
17731 return awsutil.Prettify(s)
17732}
17733
17734// GoString returns the string representation
17735func (s Rule) GoString() string {
17736 return s.String()
17737}
17738
17739// Validate inspects the fields of the type to determine if they are valid.
17740func (s *Rule) Validate() error {
17741 invalidParams := request.ErrInvalidParams{Context: "Rule"}
17742 if s.Prefix == nil {
17743 invalidParams.Add(request.NewErrParamRequired("Prefix"))
17744 }
17745 if s.Status == nil {
17746 invalidParams.Add(request.NewErrParamRequired("Status"))
17747 }
17748
17749 if invalidParams.Len() > 0 {
17750 return invalidParams
17751 }
17752 return nil
17753}
17754
17755// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value.
17756func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule {
17757 s.AbortIncompleteMultipartUpload = v
17758 return s
17759}
17760
17761// SetExpiration sets the Expiration field's value.
17762func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule {
17763 s.Expiration = v
17764 return s
17765}
17766
17767// SetID sets the ID field's value.
17768func (s *Rule) SetID(v string) *Rule {
17769 s.ID = &v
17770 return s
17771}
17772
17773// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value.
17774func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule {
17775 s.NoncurrentVersionExpiration = v
17776 return s
17777}
17778
17779// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value.
17780func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule {
17781 s.NoncurrentVersionTransition = v
17782 return s
17783}
17784
17785// SetPrefix sets the Prefix field's value.
17786func (s *Rule) SetPrefix(v string) *Rule {
17787 s.Prefix = &v
17788 return s
17789}
17790
17791// SetStatus sets the Status field's value.
17792func (s *Rule) SetStatus(v string) *Rule {
17793 s.Status = &v
17794 return s
17795}
17796
17797// SetTransition sets the Transition field's value.
17798func (s *Rule) SetTransition(v *Transition) *Rule {
17799 s.Transition = v
17800 return s
17801}
17802
17803// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis
17804type StorageClassAnalysis struct {
17805 _ struct{} `type:"structure"`
17806
17807 // A container used to describe how data related to the storage class analysis
17808 // should be exported.
17809 DataExport *StorageClassAnalysisDataExport `type:"structure"`
17810}
17811
17812// String returns the string representation
17813func (s StorageClassAnalysis) String() string {
17814 return awsutil.Prettify(s)
17815}
17816
17817// GoString returns the string representation
17818func (s StorageClassAnalysis) GoString() string {
17819 return s.String()
17820}
17821
17822// Validate inspects the fields of the type to determine if they are valid.
17823func (s *StorageClassAnalysis) Validate() error {
17824 invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"}
17825 if s.DataExport != nil {
17826 if err := s.DataExport.Validate(); err != nil {
17827 invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams))
17828 }
17829 }
17830
17831 if invalidParams.Len() > 0 {
17832 return invalidParams
17833 }
17834 return nil
17835}
17836
17837// SetDataExport sets the DataExport field's value.
17838func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis {
17839 s.DataExport = v
17840 return s
17841}
17842
17843// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport
17844type StorageClassAnalysisDataExport struct {
17845 _ struct{} `type:"structure"`
17846
17847 // The place to store the data for an analysis.
17848 //
17849 // Destination is a required field
17850 Destination *AnalyticsExportDestination `type:"structure" required:"true"`
17851
17852 // The version of the output schema to use when exporting data. Must be V_1.
17853 //
17854 // OutputSchemaVersion is a required field
17855 OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"`
17856}
17857
17858// String returns the string representation
17859func (s StorageClassAnalysisDataExport) String() string {
17860 return awsutil.Prettify(s)
17861}
17862
17863// GoString returns the string representation
17864func (s StorageClassAnalysisDataExport) GoString() string {
17865 return s.String()
17866}
17867
17868// Validate inspects the fields of the type to determine if they are valid.
17869func (s *StorageClassAnalysisDataExport) Validate() error {
17870 invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"}
17871 if s.Destination == nil {
17872 invalidParams.Add(request.NewErrParamRequired("Destination"))
17873 }
17874 if s.OutputSchemaVersion == nil {
17875 invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion"))
17876 }
17877 if s.Destination != nil {
17878 if err := s.Destination.Validate(); err != nil {
17879 invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
17880 }
17881 }
17882
17883 if invalidParams.Len() > 0 {
17884 return invalidParams
17885 }
17886 return nil
17887}
17888
17889// SetDestination sets the Destination field's value.
17890func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport {
17891 s.Destination = v
17892 return s
17893}
17894
17895// SetOutputSchemaVersion sets the OutputSchemaVersion field's value.
17896func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport {
17897 s.OutputSchemaVersion = &v
17898 return s
17899}
17900
17901// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag
17902type Tag struct {
17903 _ struct{} `type:"structure"`
17904
17905 // Name of the tag.
17906 //
17907 // Key is a required field
17908 Key *string `min:"1" type:"string" required:"true"`
17909
17910 // Value of the tag.
17911 //
17912 // Value is a required field
17913 Value *string `type:"string" required:"true"`
17914}
17915
17916// String returns the string representation
17917func (s Tag) String() string {
17918 return awsutil.Prettify(s)
17919}
17920
17921// GoString returns the string representation
17922func (s Tag) GoString() string {
17923 return s.String()
17924}
17925
17926// Validate inspects the fields of the type to determine if they are valid.
17927func (s *Tag) Validate() error {
17928 invalidParams := request.ErrInvalidParams{Context: "Tag"}
17929 if s.Key == nil {
17930 invalidParams.Add(request.NewErrParamRequired("Key"))
17931 }
17932 if s.Key != nil && len(*s.Key) < 1 {
17933 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
17934 }
17935 if s.Value == nil {
17936 invalidParams.Add(request.NewErrParamRequired("Value"))
17937 }
17938
17939 if invalidParams.Len() > 0 {
17940 return invalidParams
17941 }
17942 return nil
17943}
17944
17945// SetKey sets the Key field's value.
17946func (s *Tag) SetKey(v string) *Tag {
17947 s.Key = &v
17948 return s
17949}
17950
17951// SetValue sets the Value field's value.
17952func (s *Tag) SetValue(v string) *Tag {
17953 s.Value = &v
17954 return s
17955}
17956
17957// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging
17958type Tagging struct {
17959 _ struct{} `type:"structure"`
17960
17961 // TagSet is a required field
17962 TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
17963}
17964
17965// String returns the string representation
17966func (s Tagging) String() string {
17967 return awsutil.Prettify(s)
17968}
17969
17970// GoString returns the string representation
17971func (s Tagging) GoString() string {
17972 return s.String()
17973}
17974
17975// Validate inspects the fields of the type to determine if they are valid.
17976func (s *Tagging) Validate() error {
17977 invalidParams := request.ErrInvalidParams{Context: "Tagging"}
17978 if s.TagSet == nil {
17979 invalidParams.Add(request.NewErrParamRequired("TagSet"))
17980 }
17981 if s.TagSet != nil {
17982 for i, v := range s.TagSet {
17983 if v == nil {
17984 continue
17985 }
17986 if err := v.Validate(); err != nil {
17987 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams))
17988 }
17989 }
17990 }
17991
17992 if invalidParams.Len() > 0 {
17993 return invalidParams
17994 }
17995 return nil
17996}
17997
17998// SetTagSet sets the TagSet field's value.
17999func (s *Tagging) SetTagSet(v []*Tag) *Tagging {
18000 s.TagSet = v
18001 return s
18002}
18003
18004// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant
18005type TargetGrant struct {
18006 _ struct{} `type:"structure"`
18007
18008 Grantee *Grantee `type:"structure"`
18009
18010 // Logging permissions assigned to the Grantee for the bucket.
18011 Permission *string `type:"string" enum:"BucketLogsPermission"`
18012}
18013
18014// String returns the string representation
18015func (s TargetGrant) String() string {
18016 return awsutil.Prettify(s)
18017}
18018
18019// GoString returns the string representation
18020func (s TargetGrant) GoString() string {
18021 return s.String()
18022}
18023
18024// Validate inspects the fields of the type to determine if they are valid.
18025func (s *TargetGrant) Validate() error {
18026 invalidParams := request.ErrInvalidParams{Context: "TargetGrant"}
18027 if s.Grantee != nil {
18028 if err := s.Grantee.Validate(); err != nil {
18029 invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams))
18030 }
18031 }
18032
18033 if invalidParams.Len() > 0 {
18034 return invalidParams
18035 }
18036 return nil
18037}
18038
18039// SetGrantee sets the Grantee field's value.
18040func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant {
18041 s.Grantee = v
18042 return s
18043}
18044
18045// SetPermission sets the Permission field's value.
18046func (s *TargetGrant) SetPermission(v string) *TargetGrant {
18047 s.Permission = &v
18048 return s
18049}
18050
18051// Container for specifying the configuration when you want Amazon S3 to publish
18052// events to an Amazon Simple Notification Service (Amazon SNS) topic.
18053// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration
18054type TopicConfiguration struct {
18055 _ struct{} `type:"structure"`
18056
18057 // Events is a required field
18058 Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
18059
18060 // Container for object key name filtering rules. For information about key
18061 // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
18062 Filter *NotificationConfigurationFilter `type:"structure"`
18063
18064 // Optional unique identifier for configurations in a notification configuration.
18065 // If you don't provide one, Amazon S3 will assign an ID.
18066 Id *string `type:"string"`
18067
18068 // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects
18069 // events of specified type.
18070 //
18071 // TopicArn is a required field
18072 TopicArn *string `locationName:"Topic" type:"string" required:"true"`
18073}
18074
18075// String returns the string representation
18076func (s TopicConfiguration) String() string {
18077 return awsutil.Prettify(s)
18078}
18079
18080// GoString returns the string representation
18081func (s TopicConfiguration) GoString() string {
18082 return s.String()
18083}
18084
18085// Validate inspects the fields of the type to determine if they are valid.
18086func (s *TopicConfiguration) Validate() error {
18087 invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"}
18088 if s.Events == nil {
18089 invalidParams.Add(request.NewErrParamRequired("Events"))
18090 }
18091 if s.TopicArn == nil {
18092 invalidParams.Add(request.NewErrParamRequired("TopicArn"))
18093 }
18094
18095 if invalidParams.Len() > 0 {
18096 return invalidParams
18097 }
18098 return nil
18099}
18100
18101// SetEvents sets the Events field's value.
18102func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration {
18103 s.Events = v
18104 return s
18105}
18106
18107// SetFilter sets the Filter field's value.
18108func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration {
18109 s.Filter = v
18110 return s
18111}
18112
18113// SetId sets the Id field's value.
18114func (s *TopicConfiguration) SetId(v string) *TopicConfiguration {
18115 s.Id = &v
18116 return s
18117}
18118
18119// SetTopicArn sets the TopicArn field's value.
18120func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration {
18121 s.TopicArn = &v
18122 return s
18123}
18124
18125// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated
18126type TopicConfigurationDeprecated struct {
18127 _ struct{} `type:"structure"`
18128
18129 // Bucket event for which to send notifications.
18130 Event *string `deprecated:"true" type:"string" enum:"Event"`
18131
18132 Events []*string `locationName:"Event" type:"list" flattened:"true"`
18133
18134 // Optional unique identifier for configurations in a notification configuration.
18135 // If you don't provide one, Amazon S3 will assign an ID.
18136 Id *string `type:"string"`
18137
18138 // Amazon SNS topic to which Amazon S3 will publish a message to report the
18139 // specified events for the bucket.
18140 Topic *string `type:"string"`
18141}
18142
18143// String returns the string representation
18144func (s TopicConfigurationDeprecated) String() string {
18145 return awsutil.Prettify(s)
18146}
18147
18148// GoString returns the string representation
18149func (s TopicConfigurationDeprecated) GoString() string {
18150 return s.String()
18151}
18152
18153// SetEvent sets the Event field's value.
18154func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated {
18155 s.Event = &v
18156 return s
18157}
18158
18159// SetEvents sets the Events field's value.
18160func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated {
18161 s.Events = v
18162 return s
18163}
18164
18165// SetId sets the Id field's value.
18166func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated {
18167 s.Id = &v
18168 return s
18169}
18170
18171// SetTopic sets the Topic field's value.
18172func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated {
18173 s.Topic = &v
18174 return s
18175}
18176
18177// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition
18178type Transition struct {
18179 _ struct{} `type:"structure"`
18180
18181 // Indicates at what date the object is to be moved or deleted. Should be in
18182 // GMT ISO 8601 Format.
18183 Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
18184
18185 // Indicates the lifetime, in days, of the objects that are subject to the rule.
18186 // The value must be a non-zero positive integer.
18187 Days *int64 `type:"integer"`
18188
18189 // The class of storage used to store the object.
18190 StorageClass *string `type:"string" enum:"TransitionStorageClass"`
18191}
18192
18193// String returns the string representation
18194func (s Transition) String() string {
18195 return awsutil.Prettify(s)
18196}
18197
18198// GoString returns the string representation
18199func (s Transition) GoString() string {
18200 return s.String()
18201}
18202
18203// SetDate sets the Date field's value.
18204func (s *Transition) SetDate(v time.Time) *Transition {
18205 s.Date = &v
18206 return s
18207}
18208
18209// SetDays sets the Days field's value.
18210func (s *Transition) SetDays(v int64) *Transition {
18211 s.Days = &v
18212 return s
18213}
18214
18215// SetStorageClass sets the StorageClass field's value.
18216func (s *Transition) SetStorageClass(v string) *Transition {
18217 s.StorageClass = &v
18218 return s
18219}
18220
18221// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest
18222type UploadPartCopyInput struct {
18223 _ struct{} `type:"structure"`
18224
18225 // Bucket is a required field
18226 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
18227
18228 // The name of the source bucket and key name of the source object, separated
18229 // by a slash (/). Must be URL-encoded.
18230 //
18231 // CopySource is a required field
18232 CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
18233
18234 // Copies the object if its entity tag (ETag) matches the specified tag.
18235 CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
18236
18237 // Copies the object if it has been modified since the specified time.
18238 CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
18239
18240 // Copies the object if its entity tag (ETag) is different than the specified
18241 // ETag.
18242 CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
18243
18244 // Copies the object if it hasn't been modified since the specified time.
18245 CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
18246
18247 // The range of bytes to copy from the source object. The range value must use
18248 // the form bytes=first-last, where the first and last are the zero-based byte
18249 // offsets to copy. For example, bytes=0-9 indicates that you want to copy the
18250 // first ten bytes of the source. You can copy a range only if the source object
18251 // is greater than 5 GB.
18252 CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"`
18253
18254 // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
18255 CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
18256
18257 // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
18258 // the source object. The encryption key provided in this header must be one
18259 // that was used when the source object was created.
18260 CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"`
18261
18262 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
18263 // Amazon S3 uses this header for a message integrity check to ensure the encryption
18264 // key was transmitted without error.
18265 CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
18266
18267 // Key is a required field
18268 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
18269
18270 // Part number of part being copied. This is a positive integer between 1 and
18271 // 10,000.
18272 //
18273 // PartNumber is a required field
18274 PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
18275
18276 // Confirms that the requester knows that she or he will be charged for the
18277 // request. Bucket owners need not specify this parameter in their requests.
18278 // Documentation on downloading objects from requester pays buckets can be found
18279 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
18280 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
18281
18282 // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
18283 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
18284
18285 // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
18286 // data. This value is used to store the object and then it is discarded; Amazon
18287 // does not store the encryption key. The key must be appropriate for use with
18288 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
18289 // header. This must be the same encryption key specified in the initiate multipart
18290 // upload request.
18291 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
18292
18293 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
18294 // Amazon S3 uses this header for a message integrity check to ensure the encryption
18295 // key was transmitted without error.
18296 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
18297
18298 // Upload ID identifying the multipart upload whose part is being copied.
18299 //
18300 // UploadId is a required field
18301 UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
18302}
18303
18304// String returns the string representation
18305func (s UploadPartCopyInput) String() string {
18306 return awsutil.Prettify(s)
18307}
18308
18309// GoString returns the string representation
18310func (s UploadPartCopyInput) GoString() string {
18311 return s.String()
18312}
18313
18314// Validate inspects the fields of the type to determine if they are valid.
18315func (s *UploadPartCopyInput) Validate() error {
18316 invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"}
18317 if s.Bucket == nil {
18318 invalidParams.Add(request.NewErrParamRequired("Bucket"))
18319 }
18320 if s.CopySource == nil {
18321 invalidParams.Add(request.NewErrParamRequired("CopySource"))
18322 }
18323 if s.Key == nil {
18324 invalidParams.Add(request.NewErrParamRequired("Key"))
18325 }
18326 if s.Key != nil && len(*s.Key) < 1 {
18327 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
18328 }
18329 if s.PartNumber == nil {
18330 invalidParams.Add(request.NewErrParamRequired("PartNumber"))
18331 }
18332 if s.UploadId == nil {
18333 invalidParams.Add(request.NewErrParamRequired("UploadId"))
18334 }
18335
18336 if invalidParams.Len() > 0 {
18337 return invalidParams
18338 }
18339 return nil
18340}
18341
18342// SetBucket sets the Bucket field's value.
18343func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput {
18344 s.Bucket = &v
18345 return s
18346}
18347
18348// SetCopySource sets the CopySource field's value.
18349func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput {
18350 s.CopySource = &v
18351 return s
18352}
18353
18354// SetCopySourceIfMatch sets the CopySourceIfMatch field's value.
18355func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput {
18356 s.CopySourceIfMatch = &v
18357 return s
18358}
18359
18360// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value.
18361func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput {
18362 s.CopySourceIfModifiedSince = &v
18363 return s
18364}
18365
18366// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value.
18367func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput {
18368 s.CopySourceIfNoneMatch = &v
18369 return s
18370}
18371
18372// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value.
18373func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput {
18374 s.CopySourceIfUnmodifiedSince = &v
18375 return s
18376}
18377
18378// SetCopySourceRange sets the CopySourceRange field's value.
18379func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput {
18380 s.CopySourceRange = &v
18381 return s
18382}
18383
18384// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value.
18385func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput {
18386 s.CopySourceSSECustomerAlgorithm = &v
18387 return s
18388}
18389
18390// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value.
18391func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput {
18392 s.CopySourceSSECustomerKey = &v
18393 return s
18394}
18395
18396// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
18397func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput {
18398 s.CopySourceSSECustomerKeyMD5 = &v
18399 return s
18400}
18401
18402// SetKey sets the Key field's value.
18403func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput {
18404 s.Key = &v
18405 return s
18406}
18407
18408// SetPartNumber sets the PartNumber field's value.
18409func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput {
18410 s.PartNumber = &v
18411 return s
18412}
18413
18414// SetRequestPayer sets the RequestPayer field's value.
18415func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput {
18416 s.RequestPayer = &v
18417 return s
18418}
18419
18420// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
18421func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput {
18422 s.SSECustomerAlgorithm = &v
18423 return s
18424}
18425
18426// SetSSECustomerKey sets the SSECustomerKey field's value.
18427func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput {
18428 s.SSECustomerKey = &v
18429 return s
18430}
18431
18432// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
18433func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput {
18434 s.SSECustomerKeyMD5 = &v
18435 return s
18436}
18437
18438// SetUploadId sets the UploadId field's value.
18439func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput {
18440 s.UploadId = &v
18441 return s
18442}
18443
18444// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput
18445type UploadPartCopyOutput struct {
18446 _ struct{} `type:"structure" payload:"CopyPartResult"`
18447
18448 CopyPartResult *CopyPartResult `type:"structure"`
18449
18450 // The version of the source object that was copied, if you have enabled versioning
18451 // on the source bucket.
18452 CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
18453
18454 // If present, indicates that the requester was successfully charged for the
18455 // request.
18456 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
18457
18458 // If server-side encryption with a customer-provided encryption key was requested,
18459 // the response will include this header confirming the encryption algorithm
18460 // used.
18461 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
18462
18463 // If server-side encryption with a customer-provided encryption key was requested,
18464 // the response will include this header to provide round trip message integrity
18465 // verification of the customer-provided encryption key.
18466 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
18467
18468 // If present, specifies the ID of the AWS Key Management Service (KMS) master
18469 // encryption key that was used for the object.
18470 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
18471
18472 // The Server-side encryption algorithm used when storing this object in S3
18473 // (e.g., AES256, aws:kms).
18474 ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
18475}
18476
18477// String returns the string representation
18478func (s UploadPartCopyOutput) String() string {
18479 return awsutil.Prettify(s)
18480}
18481
18482// GoString returns the string representation
18483func (s UploadPartCopyOutput) GoString() string {
18484 return s.String()
18485}
18486
18487// SetCopyPartResult sets the CopyPartResult field's value.
18488func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput {
18489 s.CopyPartResult = v
18490 return s
18491}
18492
18493// SetCopySourceVersionId sets the CopySourceVersionId field's value.
18494func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput {
18495 s.CopySourceVersionId = &v
18496 return s
18497}
18498
18499// SetRequestCharged sets the RequestCharged field's value.
18500func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput {
18501 s.RequestCharged = &v
18502 return s
18503}
18504
18505// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
18506func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput {
18507 s.SSECustomerAlgorithm = &v
18508 return s
18509}
18510
18511// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
18512func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput {
18513 s.SSECustomerKeyMD5 = &v
18514 return s
18515}
18516
18517// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
18518func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput {
18519 s.SSEKMSKeyId = &v
18520 return s
18521}
18522
18523// SetServerSideEncryption sets the ServerSideEncryption field's value.
18524func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput {
18525 s.ServerSideEncryption = &v
18526 return s
18527}
18528
18529// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest
18530type UploadPartInput struct {
18531 _ struct{} `type:"structure" payload:"Body"`
18532
18533 // Object data.
18534 Body io.ReadSeeker `type:"blob"`
18535
18536 // Name of the bucket to which the multipart upload was initiated.
18537 //
18538 // Bucket is a required field
18539 Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
18540
18541 // Size of the body in bytes. This parameter is useful when the size of the
18542 // body cannot be determined automatically.
18543 ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
18544
18545 // Object key for which the multipart upload was initiated.
18546 //
18547 // Key is a required field
18548 Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
18549
18550 // Part number of part being uploaded. This is a positive integer between 1
18551 // and 10,000.
18552 //
18553 // PartNumber is a required field
18554 PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
18555
18556 // Confirms that the requester knows that she or he will be charged for the
18557 // request. Bucket owners need not specify this parameter in their requests.
18558 // Documentation on downloading objects from requester pays buckets can be found
18559 // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
18560 RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
18561
18562 // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
18563 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
18564
18565 // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
18566 // data. This value is used to store the object and then it is discarded; Amazon
18567 // does not store the encryption key. The key must be appropriate for use with
18568 // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
18569 // header. This must be the same encryption key specified in the initiate multipart
18570 // upload request.
18571 SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
18572
18573 // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
18574 // Amazon S3 uses this header for a message integrity check to ensure the encryption
18575 // key was transmitted without error.
18576 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
18577
18578 // Upload ID identifying the multipart upload whose part is being uploaded.
18579 //
18580 // UploadId is a required field
18581 UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
18582}
18583
18584// String returns the string representation
18585func (s UploadPartInput) String() string {
18586 return awsutil.Prettify(s)
18587}
18588
18589// GoString returns the string representation
18590func (s UploadPartInput) GoString() string {
18591 return s.String()
18592}
18593
18594// Validate inspects the fields of the type to determine if they are valid.
18595func (s *UploadPartInput) Validate() error {
18596 invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"}
18597 if s.Bucket == nil {
18598 invalidParams.Add(request.NewErrParamRequired("Bucket"))
18599 }
18600 if s.Key == nil {
18601 invalidParams.Add(request.NewErrParamRequired("Key"))
18602 }
18603 if s.Key != nil && len(*s.Key) < 1 {
18604 invalidParams.Add(request.NewErrParamMinLen("Key", 1))
18605 }
18606 if s.PartNumber == nil {
18607 invalidParams.Add(request.NewErrParamRequired("PartNumber"))
18608 }
18609 if s.UploadId == nil {
18610 invalidParams.Add(request.NewErrParamRequired("UploadId"))
18611 }
18612
18613 if invalidParams.Len() > 0 {
18614 return invalidParams
18615 }
18616 return nil
18617}
18618
18619// SetBody sets the Body field's value.
18620func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput {
18621 s.Body = v
18622 return s
18623}
18624
18625// SetBucket sets the Bucket field's value.
18626func (s *UploadPartInput) SetBucket(v string) *UploadPartInput {
18627 s.Bucket = &v
18628 return s
18629}
18630
18631// SetContentLength sets the ContentLength field's value.
18632func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput {
18633 s.ContentLength = &v
18634 return s
18635}
18636
18637// SetKey sets the Key field's value.
18638func (s *UploadPartInput) SetKey(v string) *UploadPartInput {
18639 s.Key = &v
18640 return s
18641}
18642
18643// SetPartNumber sets the PartNumber field's value.
18644func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput {
18645 s.PartNumber = &v
18646 return s
18647}
18648
18649// SetRequestPayer sets the RequestPayer field's value.
18650func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput {
18651 s.RequestPayer = &v
18652 return s
18653}
18654
18655// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
18656func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput {
18657 s.SSECustomerAlgorithm = &v
18658 return s
18659}
18660
18661// SetSSECustomerKey sets the SSECustomerKey field's value.
18662func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput {
18663 s.SSECustomerKey = &v
18664 return s
18665}
18666
18667// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
18668func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput {
18669 s.SSECustomerKeyMD5 = &v
18670 return s
18671}
18672
18673// SetUploadId sets the UploadId field's value.
18674func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput {
18675 s.UploadId = &v
18676 return s
18677}
18678
18679// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput
18680type UploadPartOutput struct {
18681 _ struct{} `type:"structure"`
18682
18683 // Entity tag for the uploaded object.
18684 ETag *string `location:"header" locationName:"ETag" type:"string"`
18685
18686 // If present, indicates that the requester was successfully charged for the
18687 // request.
18688 RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
18689
18690 // If server-side encryption with a customer-provided encryption key was requested,
18691 // the response will include this header confirming the encryption algorithm
18692 // used.
18693 SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
18694
18695 // If server-side encryption with a customer-provided encryption key was requested,
18696 // the response will include this header to provide round trip message integrity
18697 // verification of the customer-provided encryption key.
18698 SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
18699
18700 // If present, specifies the ID of the AWS Key Management Service (KMS) master
18701 // encryption key that was used for the object.
18702 SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
18703
18704 // The Server-side encryption algorithm used when storing this object in S3
18705 // (e.g., AES256, aws:kms).
18706 ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
18707}
18708
18709// String returns the string representation
18710func (s UploadPartOutput) String() string {
18711 return awsutil.Prettify(s)
18712}
18713
18714// GoString returns the string representation
18715func (s UploadPartOutput) GoString() string {
18716 return s.String()
18717}
18718
18719// SetETag sets the ETag field's value.
18720func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput {
18721 s.ETag = &v
18722 return s
18723}
18724
18725// SetRequestCharged sets the RequestCharged field's value.
18726func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput {
18727 s.RequestCharged = &v
18728 return s
18729}
18730
18731// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
18732func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput {
18733 s.SSECustomerAlgorithm = &v
18734 return s
18735}
18736
18737// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
18738func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput {
18739 s.SSECustomerKeyMD5 = &v
18740 return s
18741}
18742
18743// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
18744func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput {
18745 s.SSEKMSKeyId = &v
18746 return s
18747}
18748
18749// SetServerSideEncryption sets the ServerSideEncryption field's value.
18750func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput {
18751 s.ServerSideEncryption = &v
18752 return s
18753}
18754
18755// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration
18756type VersioningConfiguration struct {
18757 _ struct{} `type:"structure"`
18758
18759 // Specifies whether MFA delete is enabled in the bucket versioning configuration.
18760 // This element is only returned if the bucket has been configured with MFA
18761 // delete. If the bucket has never been so configured, this element is not returned.
18762 MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"`
18763
18764 // The versioning state of the bucket.
18765 Status *string `type:"string" enum:"BucketVersioningStatus"`
18766}
18767
18768// String returns the string representation
18769func (s VersioningConfiguration) String() string {
18770 return awsutil.Prettify(s)
18771}
18772
18773// GoString returns the string representation
18774func (s VersioningConfiguration) GoString() string {
18775 return s.String()
18776}
18777
18778// SetMFADelete sets the MFADelete field's value.
18779func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration {
18780 s.MFADelete = &v
18781 return s
18782}
18783
18784// SetStatus sets the Status field's value.
18785func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration {
18786 s.Status = &v
18787 return s
18788}
18789
18790// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration
18791type WebsiteConfiguration struct {
18792 _ struct{} `type:"structure"`
18793
18794 ErrorDocument *ErrorDocument `type:"structure"`
18795
18796 IndexDocument *IndexDocument `type:"structure"`
18797
18798 RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
18799
18800 RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
18801}
18802
18803// String returns the string representation
18804func (s WebsiteConfiguration) String() string {
18805 return awsutil.Prettify(s)
18806}
18807
18808// GoString returns the string representation
18809func (s WebsiteConfiguration) GoString() string {
18810 return s.String()
18811}
18812
18813// Validate inspects the fields of the type to determine if they are valid.
18814func (s *WebsiteConfiguration) Validate() error {
18815 invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"}
18816 if s.ErrorDocument != nil {
18817 if err := s.ErrorDocument.Validate(); err != nil {
18818 invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams))
18819 }
18820 }
18821 if s.IndexDocument != nil {
18822 if err := s.IndexDocument.Validate(); err != nil {
18823 invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams))
18824 }
18825 }
18826 if s.RedirectAllRequestsTo != nil {
18827 if err := s.RedirectAllRequestsTo.Validate(); err != nil {
18828 invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams))
18829 }
18830 }
18831 if s.RoutingRules != nil {
18832 for i, v := range s.RoutingRules {
18833 if v == nil {
18834 continue
18835 }
18836 if err := v.Validate(); err != nil {
18837 invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams))
18838 }
18839 }
18840 }
18841
18842 if invalidParams.Len() > 0 {
18843 return invalidParams
18844 }
18845 return nil
18846}
18847
18848// SetErrorDocument sets the ErrorDocument field's value.
18849func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration {
18850 s.ErrorDocument = v
18851 return s
18852}
18853
18854// SetIndexDocument sets the IndexDocument field's value.
18855func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration {
18856 s.IndexDocument = v
18857 return s
18858}
18859
18860// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value.
18861func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration {
18862 s.RedirectAllRequestsTo = v
18863 return s
18864}
18865
18866// SetRoutingRules sets the RoutingRules field's value.
18867func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration {
18868 s.RoutingRules = v
18869 return s
18870}
18871
18872const (
18873 // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value
18874 AnalyticsS3ExportFileFormatCsv = "CSV"
18875)
18876
18877const (
18878 // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value
18879 BucketAccelerateStatusEnabled = "Enabled"
18880
18881 // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value
18882 BucketAccelerateStatusSuspended = "Suspended"
18883)
18884
18885const (
18886 // BucketCannedACLPrivate is a BucketCannedACL enum value
18887 BucketCannedACLPrivate = "private"
18888
18889 // BucketCannedACLPublicRead is a BucketCannedACL enum value
18890 BucketCannedACLPublicRead = "public-read"
18891
18892 // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value
18893 BucketCannedACLPublicReadWrite = "public-read-write"
18894
18895 // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value
18896 BucketCannedACLAuthenticatedRead = "authenticated-read"
18897)
18898
18899const (
18900 // BucketLocationConstraintEu is a BucketLocationConstraint enum value
18901 BucketLocationConstraintEu = "EU"
18902
18903 // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value
18904 BucketLocationConstraintEuWest1 = "eu-west-1"
18905
18906 // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value
18907 BucketLocationConstraintUsWest1 = "us-west-1"
18908
18909 // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value
18910 BucketLocationConstraintUsWest2 = "us-west-2"
18911
18912 // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value
18913 BucketLocationConstraintApSouth1 = "ap-south-1"
18914
18915 // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value
18916 BucketLocationConstraintApSoutheast1 = "ap-southeast-1"
18917
18918 // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value
18919 BucketLocationConstraintApSoutheast2 = "ap-southeast-2"
18920
18921 // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value
18922 BucketLocationConstraintApNortheast1 = "ap-northeast-1"
18923
18924 // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value
18925 BucketLocationConstraintSaEast1 = "sa-east-1"
18926
18927 // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value
18928 BucketLocationConstraintCnNorth1 = "cn-north-1"
18929
18930 // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value
18931 BucketLocationConstraintEuCentral1 = "eu-central-1"
18932)
18933
18934const (
18935 // BucketLogsPermissionFullControl is a BucketLogsPermission enum value
18936 BucketLogsPermissionFullControl = "FULL_CONTROL"
18937
18938 // BucketLogsPermissionRead is a BucketLogsPermission enum value
18939 BucketLogsPermissionRead = "READ"
18940
18941 // BucketLogsPermissionWrite is a BucketLogsPermission enum value
18942 BucketLogsPermissionWrite = "WRITE"
18943)
18944
18945const (
18946 // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value
18947 BucketVersioningStatusEnabled = "Enabled"
18948
18949 // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value
18950 BucketVersioningStatusSuspended = "Suspended"
18951)
18952
18953// Requests Amazon S3 to encode the object keys in the response and specifies
18954// the encoding method to use. An object key may contain any Unicode character;
18955// however, XML 1.0 parser cannot parse some characters, such as characters
18956// with an ASCII value from 0 to 10. For characters that are not supported in
18957// XML 1.0, you can add this parameter to request that Amazon S3 encode the
18958// keys in the response.
18959const (
18960 // EncodingTypeUrl is a EncodingType enum value
18961 EncodingTypeUrl = "url"
18962)
18963
18964// Bucket event for which to send notifications.
18965const (
18966 // EventS3ReducedRedundancyLostObject is a Event enum value
18967 EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
18968
18969 // EventS3ObjectCreated is a Event enum value
18970 EventS3ObjectCreated = "s3:ObjectCreated:*"
18971
18972 // EventS3ObjectCreatedPut is a Event enum value
18973 EventS3ObjectCreatedPut = "s3:ObjectCreated:Put"
18974
18975 // EventS3ObjectCreatedPost is a Event enum value
18976 EventS3ObjectCreatedPost = "s3:ObjectCreated:Post"
18977
18978 // EventS3ObjectCreatedCopy is a Event enum value
18979 EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy"
18980
18981 // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value
18982 EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
18983
18984 // EventS3ObjectRemoved is a Event enum value
18985 EventS3ObjectRemoved = "s3:ObjectRemoved:*"
18986
18987 // EventS3ObjectRemovedDelete is a Event enum value
18988 EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
18989
18990 // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value
18991 EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
18992)
18993
18994const (
18995 // ExpirationStatusEnabled is a ExpirationStatus enum value
18996 ExpirationStatusEnabled = "Enabled"
18997
18998 // ExpirationStatusDisabled is a ExpirationStatus enum value
18999 ExpirationStatusDisabled = "Disabled"
19000)
19001
19002const (
19003 // FilterRuleNamePrefix is a FilterRuleName enum value
19004 FilterRuleNamePrefix = "prefix"
19005
19006 // FilterRuleNameSuffix is a FilterRuleName enum value
19007 FilterRuleNameSuffix = "suffix"
19008)
19009
19010const (
19011 // InventoryFormatCsv is a InventoryFormat enum value
19012 InventoryFormatCsv = "CSV"
19013)
19014
19015const (
19016 // InventoryFrequencyDaily is a InventoryFrequency enum value
19017 InventoryFrequencyDaily = "Daily"
19018
19019 // InventoryFrequencyWeekly is a InventoryFrequency enum value
19020 InventoryFrequencyWeekly = "Weekly"
19021)
19022
19023const (
19024 // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value
19025 InventoryIncludedObjectVersionsAll = "All"
19026
19027 // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value
19028 InventoryIncludedObjectVersionsCurrent = "Current"
19029)
19030
19031const (
19032 // InventoryOptionalFieldSize is a InventoryOptionalField enum value
19033 InventoryOptionalFieldSize = "Size"
19034
19035 // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value
19036 InventoryOptionalFieldLastModifiedDate = "LastModifiedDate"
19037
19038 // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value
19039 InventoryOptionalFieldStorageClass = "StorageClass"
19040
19041 // InventoryOptionalFieldEtag is a InventoryOptionalField enum value
19042 InventoryOptionalFieldEtag = "ETag"
19043
19044 // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value
19045 InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded"
19046
19047 // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value
19048 InventoryOptionalFieldReplicationStatus = "ReplicationStatus"
19049)
19050
19051const (
19052 // MFADeleteEnabled is a MFADelete enum value
19053 MFADeleteEnabled = "Enabled"
19054
19055 // MFADeleteDisabled is a MFADelete enum value
19056 MFADeleteDisabled = "Disabled"
19057)
19058
19059const (
19060 // MFADeleteStatusEnabled is a MFADeleteStatus enum value
19061 MFADeleteStatusEnabled = "Enabled"
19062
19063 // MFADeleteStatusDisabled is a MFADeleteStatus enum value
19064 MFADeleteStatusDisabled = "Disabled"
19065)
19066
19067const (
19068 // MetadataDirectiveCopy is a MetadataDirective enum value
19069 MetadataDirectiveCopy = "COPY"
19070
19071 // MetadataDirectiveReplace is a MetadataDirective enum value
19072 MetadataDirectiveReplace = "REPLACE"
19073)
19074
19075const (
19076 // ObjectCannedACLPrivate is a ObjectCannedACL enum value
19077 ObjectCannedACLPrivate = "private"
19078
19079 // ObjectCannedACLPublicRead is a ObjectCannedACL enum value
19080 ObjectCannedACLPublicRead = "public-read"
19081
19082 // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value
19083 ObjectCannedACLPublicReadWrite = "public-read-write"
19084
19085 // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value
19086 ObjectCannedACLAuthenticatedRead = "authenticated-read"
19087
19088 // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value
19089 ObjectCannedACLAwsExecRead = "aws-exec-read"
19090
19091 // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value
19092 ObjectCannedACLBucketOwnerRead = "bucket-owner-read"
19093
19094 // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value
19095 ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control"
19096)
19097
19098const (
19099 // ObjectStorageClassStandard is a ObjectStorageClass enum value
19100 ObjectStorageClassStandard = "STANDARD"
19101
19102 // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value
19103 ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY"
19104
19105 // ObjectStorageClassGlacier is a ObjectStorageClass enum value
19106 ObjectStorageClassGlacier = "GLACIER"
19107)
19108
19109const (
19110 // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value
19111 ObjectVersionStorageClassStandard = "STANDARD"
19112)
19113
19114const (
19115 // PayerRequester is a Payer enum value
19116 PayerRequester = "Requester"
19117
19118 // PayerBucketOwner is a Payer enum value
19119 PayerBucketOwner = "BucketOwner"
19120)
19121
19122const (
19123 // PermissionFullControl is a Permission enum value
19124 PermissionFullControl = "FULL_CONTROL"
19125
19126 // PermissionWrite is a Permission enum value
19127 PermissionWrite = "WRITE"
19128
19129 // PermissionWriteAcp is a Permission enum value
19130 PermissionWriteAcp = "WRITE_ACP"
19131
19132 // PermissionRead is a Permission enum value
19133 PermissionRead = "READ"
19134
19135 // PermissionReadAcp is a Permission enum value
19136 PermissionReadAcp = "READ_ACP"
19137)
19138
19139const (
19140 // ProtocolHttp is a Protocol enum value
19141 ProtocolHttp = "http"
19142
19143 // ProtocolHttps is a Protocol enum value
19144 ProtocolHttps = "https"
19145)
19146
19147const (
19148 // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value
19149 ReplicationRuleStatusEnabled = "Enabled"
19150
19151 // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value
19152 ReplicationRuleStatusDisabled = "Disabled"
19153)
19154
19155const (
19156 // ReplicationStatusComplete is a ReplicationStatus enum value
19157 ReplicationStatusComplete = "COMPLETE"
19158
19159 // ReplicationStatusPending is a ReplicationStatus enum value
19160 ReplicationStatusPending = "PENDING"
19161
19162 // ReplicationStatusFailed is a ReplicationStatus enum value
19163 ReplicationStatusFailed = "FAILED"
19164
19165 // ReplicationStatusReplica is a ReplicationStatus enum value
19166 ReplicationStatusReplica = "REPLICA"
19167)
19168
19169// If present, indicates that the requester was successfully charged for the
19170// request.
19171const (
19172 // RequestChargedRequester is a RequestCharged enum value
19173 RequestChargedRequester = "requester"
19174)
19175
19176// Confirms that the requester knows that she or he will be charged for the
19177// request. Bucket owners need not specify this parameter in their requests.
19178// Documentation on downloading objects from requester pays buckets can be found
19179// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
19180const (
19181 // RequestPayerRequester is a RequestPayer enum value
19182 RequestPayerRequester = "requester"
19183)
19184
19185const (
19186 // ServerSideEncryptionAes256 is a ServerSideEncryption enum value
19187 ServerSideEncryptionAes256 = "AES256"
19188
19189 // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value
19190 ServerSideEncryptionAwsKms = "aws:kms"
19191)
19192
19193const (
19194 // StorageClassStandard is a StorageClass enum value
19195 StorageClassStandard = "STANDARD"
19196
19197 // StorageClassReducedRedundancy is a StorageClass enum value
19198 StorageClassReducedRedundancy = "REDUCED_REDUNDANCY"
19199
19200 // StorageClassStandardIa is a StorageClass enum value
19201 StorageClassStandardIa = "STANDARD_IA"
19202)
19203
19204const (
19205 // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value
19206 StorageClassAnalysisSchemaVersionV1 = "V_1"
19207)
19208
19209const (
19210 // TaggingDirectiveCopy is a TaggingDirective enum value
19211 TaggingDirectiveCopy = "COPY"
19212
19213 // TaggingDirectiveReplace is a TaggingDirective enum value
19214 TaggingDirectiveReplace = "REPLACE"
19215)
19216
19217const (
19218 // TierStandard is a Tier enum value
19219 TierStandard = "Standard"
19220
19221 // TierBulk is a Tier enum value
19222 TierBulk = "Bulk"
19223
19224 // TierExpedited is a Tier enum value
19225 TierExpedited = "Expedited"
19226)
19227
19228const (
19229 // TransitionStorageClassGlacier is a TransitionStorageClass enum value
19230 TransitionStorageClassGlacier = "GLACIER"
19231
19232 // TransitionStorageClassStandardIa is a TransitionStorageClass enum value
19233 TransitionStorageClassStandardIa = "STANDARD_IA"
19234)
19235
19236const (
19237 // TypeCanonicalUser is a Type enum value
19238 TypeCanonicalUser = "CanonicalUser"
19239
19240 // TypeAmazonCustomerByEmail is a Type enum value
19241 TypeAmazonCustomerByEmail = "AmazonCustomerByEmail"
19242
19243 // TypeGroup is a Type enum value
19244 TypeGroup = "Group"
19245)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
new file mode 100644
index 0000000..bc68a46
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
@@ -0,0 +1,106 @@
1package s3
2
3import (
4 "io/ioutil"
5 "regexp"
6
7 "github.com/aws/aws-sdk-go/aws"
8 "github.com/aws/aws-sdk-go/aws/awserr"
9 "github.com/aws/aws-sdk-go/aws/awsutil"
10 "github.com/aws/aws-sdk-go/aws/request"
11)
12
13var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`)
14
15// NormalizeBucketLocation is a utility function which will update the
16// passed in value to always be a region ID. Generally this would be used
17// with GetBucketLocation API operation.
18//
19// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
20//
21// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
22// for more information on the values that can be returned.
23func NormalizeBucketLocation(loc string) string {
24 switch loc {
25 case "":
26 loc = "us-east-1"
27 case "EU":
28 loc = "eu-west-1"
29 }
30
31 return loc
32}
33
34// NormalizeBucketLocationHandler is a request handler which will update the
35// GetBucketLocation's result LocationConstraint value to always be a region ID.
36//
37// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
38//
39// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
40// for more information on the values that can be returned.
41//
42// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
43// Bucket: aws.String(bucket),
44// })
45// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
46// err := req.Send()
47var NormalizeBucketLocationHandler = request.NamedHandler{
48 Name: "awssdk.s3.NormalizeBucketLocation",
49 Fn: func(req *request.Request) {
50 if req.Error != nil {
51 return
52 }
53
54 out := req.Data.(*GetBucketLocationOutput)
55 loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint))
56 out.LocationConstraint = aws.String(loc)
57 },
58}
59
60// WithNormalizeBucketLocation is a request option which will update the
61// GetBucketLocation's result LocationConstraint value to always be a region ID.
62//
63// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
64//
65// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
66// for more information on the values that can be returned.
67//
68// result, err := svc.GetBucketLocationWithContext(ctx,
69// &s3.GetBucketLocationInput{
70// Bucket: aws.String(bucket),
71// },
72// s3.WithNormalizeBucketLocation,
73// )
74func WithNormalizeBucketLocation(r *request.Request) {
75 r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
76}
77
78func buildGetBucketLocation(r *request.Request) {
79 if r.DataFilled() {
80 out := r.Data.(*GetBucketLocationOutput)
81 b, err := ioutil.ReadAll(r.HTTPResponse.Body)
82 if err != nil {
83 r.Error = awserr.New("SerializationError", "failed reading response body", err)
84 return
85 }
86
87 match := reBucketLocation.FindSubmatch(b)
88 if len(match) > 1 {
89 loc := string(match[1])
90 out.LocationConstraint = aws.String(loc)
91 }
92 }
93}
94
95func populateLocationConstraint(r *request.Request) {
96 if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" {
97 in := r.Params.(*CreateBucketInput)
98 if in.CreateBucketConfiguration == nil {
99 r.Params = awsutil.CopyOf(r.Params)
100 in = r.Params.(*CreateBucketInput)
101 in.CreateBucketConfiguration = &CreateBucketConfiguration{
102 LocationConstraint: r.Config.Region,
103 }
104 }
105 }
106}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
new file mode 100644
index 0000000..9fc5df9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
@@ -0,0 +1,36 @@
1package s3
2
3import (
4 "crypto/md5"
5 "encoding/base64"
6 "io"
7
8 "github.com/aws/aws-sdk-go/aws/awserr"
9 "github.com/aws/aws-sdk-go/aws/request"
10)
11
12// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
13// require it.
14func contentMD5(r *request.Request) {
15 h := md5.New()
16
17 // hash the body. seek back to the first position after reading to reset
18 // the body for transmission. copy errors may be assumed to be from the
19 // body.
20 _, err := io.Copy(h, r.Body)
21 if err != nil {
22 r.Error = awserr.New("ContentMD5", "failed to read body", err)
23 return
24 }
25 _, err = r.Body.Seek(0, 0)
26 if err != nil {
27 r.Error = awserr.New("ContentMD5", "failed to seek body", err)
28 return
29 }
30
31 // encode the md5 checksum in base64 and set the request header.
32 sum := h.Sum(nil)
33 sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
34 base64.StdEncoding.Encode(sum64, sum)
35 r.HTTPRequest.Header.Set("Content-MD5", string(sum64))
36}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
new file mode 100644
index 0000000..8463347
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
@@ -0,0 +1,46 @@
1package s3
2
3import (
4 "github.com/aws/aws-sdk-go/aws/client"
5 "github.com/aws/aws-sdk-go/aws/request"
6)
7
8func init() {
9 initClient = defaultInitClientFn
10 initRequest = defaultInitRequestFn
11}
12
13func defaultInitClientFn(c *client.Client) {
14 // Support building custom endpoints based on config
15 c.Handlers.Build.PushFront(updateEndpointForS3Config)
16
17 // Require SSL when using SSE keys
18 c.Handlers.Validate.PushBack(validateSSERequiresSSL)
19 c.Handlers.Build.PushBack(computeSSEKeys)
20
21 // S3 uses custom error unmarshaling logic
22 c.Handlers.UnmarshalError.Clear()
23 c.Handlers.UnmarshalError.PushBack(unmarshalError)
24}
25
26func defaultInitRequestFn(r *request.Request) {
27 // Add reuest handlers for specific platforms.
28 // e.g. 100-continue support for PUT requests using Go 1.6
29 platformRequestHandlers(r)
30
31 switch r.Operation.Name {
32 case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
33 opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
34 opPutBucketReplication:
35 // These S3 operations require Content-MD5 to be set
36 r.Handlers.Build.PushBack(contentMD5)
37 case opGetBucketLocation:
38 // GetBucketLocation has custom parsing logic
39 r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
40 case opCreateBucket:
41 // Auto-populate LocationConstraint with current region
42 r.Handlers.Validate.PushFront(populateLocationConstraint)
43 case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
44 r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
45 }
46}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
new file mode 100644
index 0000000..f045fd0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
@@ -0,0 +1,78 @@
1// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
2
3// Package s3 provides the client and types for making API
4// requests to Amazon Simple Storage Service.
5//
6// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service.
7//
8// See s3 package documentation for more information.
9// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/
10//
11// Using the Client
12//
13// To use the client for Amazon Simple Storage Service you will first need
14// to create a new instance of it.
15//
16// When creating a client for an AWS service you'll first need to have a Session
17// already created. The Session provides configuration that can be shared
18// between multiple service clients. Additional configuration can be applied to
19// the Session and service's client when they are constructed. The aws package's
20// Config type contains several fields such as Region for the AWS Region the
21// client should make API requests too. The optional Config value can be provided
22// as the variadic argument for Sessions and client creation.
23//
24// Once the service's client is created you can use it to make API requests the
25// AWS service. These clients are safe to use concurrently.
26//
27// // Create a session to share configuration, and load external configuration.
28// sess := session.Must(session.NewSession())
29//
30// // Create the service's client with the session.
31// svc := s3.New(sess)
32//
33// See the SDK's documentation for more information on how to use service clients.
34// https://docs.aws.amazon.com/sdk-for-go/api/
35//
36// See aws package's Config type for more information on configuration options.
37// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
38//
39// See the Amazon Simple Storage Service client S3 for more
40// information on creating the service's client.
41// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New
42//
43// Once the client is created you can make an API request to the service.
44// Each API method takes a input parameter, and returns the service response
45// and an error.
46//
47// The API method will document which error codes the service can be returned
48// by the operation if the service models the API operation's errors. These
49// errors will also be available as const strings prefixed with "ErrCode".
50//
51// result, err := svc.AbortMultipartUpload(params)
52// if err != nil {
53// // Cast err to awserr.Error to handle specific error codes.
54// aerr, ok := err.(awserr.Error)
55// if ok && aerr.Code() == <error code to check for> {
56// // Specific error code handling
57// }
58// return err
59// }
60//
61// fmt.Println("AbortMultipartUpload result:")
62// fmt.Println(result)
63//
64// Using the Client with Context
65//
66// The service's client also provides methods to make API requests with a Context
67// value. This allows you to control the timeout, and cancellation of pending
68// requests. These methods also take request Option as variadic parameter to apply
69// additional configuration to the API request.
70//
71// ctx := context.Background()
72//
73// result, err := svc.AbortMultipartUploadWithContext(ctx, params)
74//
75// See the request package documentation for more information on using Context pattern
76// with the SDK.
77// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
78package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
new file mode 100644
index 0000000..b794a63
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
@@ -0,0 +1,109 @@
1// Upload Managers
2//
3// The s3manager package's Uploader provides concurrent upload of content to S3
4// by taking advantage of S3's Multipart APIs. The Uploader also supports both
5// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker
6// for optimizations if the Body satisfies that type. Once the Uploader instance
7// is created you can call Upload concurrently from multiple goroutines safely.
8//
9// // The session the S3 Uploader will use
10// sess := session.Must(session.NewSession())
11//
12// // Create an uploader with the session and default options
13// uploader := s3manager.NewUploader(sess)
14//
15// f, err := os.Open(filename)
16// if err != nil {
17// return fmt.Errorf("failed to open file %q, %v", filename, err)
18// }
19//
20// // Upload the file to S3.
21// result, err := uploader.Upload(&s3manager.UploadInput{
22// Bucket: aws.String(myBucket),
23// Key: aws.String(myString),
24// Body: f,
25// })
26// if err != nil {
27// return fmt.Errorf("failed to upload file, %v", err)
28// }
29// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
30//
31// See the s3manager package's Uploader type documentation for more information.
32// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader
33//
34// Download Manager
35//
36// The s3manager package's Downloader provides concurrently downloading of Objects
37// from S3. The Downloader will write S3 Object content with an io.WriterAt.
38// Once the Downloader instance is created you can call Upload concurrently from
39// multiple goroutines safely.
40//
41// // The session the S3 Downloader will use
42// sess := session.Must(session.NewSession())
43//
44// // Create a downloader with the session and default options
45// downloader := s3manager.NewDownloader(sess)
46//
47// // Create a file to write the S3 Object contents to.
48// f, err := os.Create(filename)
49// if err != nil {
50// return fmt.Errorf("failed to create file %q, %v", filename, err)
51// }
52//
53// // Write the contents of S3 Object to the file
54// n, err := downloader.Download(f, &s3.GetObjectInput{
55// Bucket: aws.String(myBucket),
56// Key: aws.String(myString),
57// })
58// if err != nil {
59// return fmt.Errorf("failed to upload file, %v", err)
60// }
61// fmt.Printf("file downloaded, %d bytes\n", n)
62//
63// See the s3manager package's Downloader type documentation for more information.
64// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
65//
66// Get Bucket Region
67//
68// GetBucketRegion will attempt to get the region for a bucket using a region
69// hint to determine which AWS partition to perform the query on. Use this utility
70// to determine the region a bucket is in.
71//
72// sess := session.Must(session.NewSession())
73//
74// bucket := "my-bucket"
75// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
76// if err != nil {
77// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
78// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
79// }
80// return err
81// }
82// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
83//
84// See the s3manager package's GetBucketRegion function documentation for more information
85// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion
86//
87// S3 Crypto Client
88//
89// The s3crypto package provides the tools to upload and download encrypted
90// content from S3. The Encryption and Decryption clients can be used concurrently
91// once the client is created.
92//
93// sess := session.Must(session.NewSession())
94//
95// // Create the decryption client.
96// svc := s3crypto.NewDecryptionClient(sess)
97//
98// // The object will be downloaded from S3 and decrypted locally. By metadata
99// // about the object's encryption will instruct the decryption client how
100// // decrypt the content of the object. By default KMS is used for keys.
101// result, err := svc.GetObject(&s3.GetObjectInput {
102// Bucket: aws.String(myBucket),
103// Key: aws.String(myKey),
104// })
105//
106// See the s3crypto package documentation for more information.
107// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
108//
109package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
new file mode 100644
index 0000000..931cb17
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
@@ -0,0 +1,48 @@
1// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
2
3package s3
4
5const (
6
7 // ErrCodeBucketAlreadyExists for service response error code
8 // "BucketAlreadyExists".
9 //
10 // The requested bucket name is not available. The bucket namespace is shared
11 // by all users of the system. Please select a different name and try again.
12 ErrCodeBucketAlreadyExists = "BucketAlreadyExists"
13
14 // ErrCodeBucketAlreadyOwnedByYou for service response error code
15 // "BucketAlreadyOwnedByYou".
16 ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
17
18 // ErrCodeNoSuchBucket for service response error code
19 // "NoSuchBucket".
20 //
21 // The specified bucket does not exist.
22 ErrCodeNoSuchBucket = "NoSuchBucket"
23
24 // ErrCodeNoSuchKey for service response error code
25 // "NoSuchKey".
26 //
27 // The specified key does not exist.
28 ErrCodeNoSuchKey = "NoSuchKey"
29
30 // ErrCodeNoSuchUpload for service response error code
31 // "NoSuchUpload".
32 //
33 // The specified multipart upload does not exist.
34 ErrCodeNoSuchUpload = "NoSuchUpload"
35
36 // ErrCodeObjectAlreadyInActiveTierError for service response error code
37 // "ObjectAlreadyInActiveTierError".
38 //
39 // This operation is not allowed against this storage tier
40 ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError"
41
42 // ErrCodeObjectNotInActiveTierError for service response error code
43 // "ObjectNotInActiveTierError".
44 //
45 // The source object of the COPY operation is not in the active tier and is
46 // only stored in Amazon Glacier.
47 ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError"
48)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
new file mode 100644
index 0000000..ec3ffe4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
@@ -0,0 +1,162 @@
1package s3
2
3import (
4 "fmt"
5 "net/url"
6 "regexp"
7 "strings"
8
9 "github.com/aws/aws-sdk-go/aws"
10 "github.com/aws/aws-sdk-go/aws/awserr"
11 "github.com/aws/aws-sdk-go/aws/awsutil"
12 "github.com/aws/aws-sdk-go/aws/request"
13)
14
15// an operationBlacklist is a list of operation names that should a
16// request handler should not be executed with.
17type operationBlacklist []string
18
19// Continue will return true of the Request's operation name is not
20// in the blacklist. False otherwise.
21func (b operationBlacklist) Continue(r *request.Request) bool {
22 for i := 0; i < len(b); i++ {
23 if b[i] == r.Operation.Name {
24 return false
25 }
26 }
27 return true
28}
29
30var accelerateOpBlacklist = operationBlacklist{
31 opListBuckets, opCreateBucket, opDeleteBucket,
32}
33
34// Request handler to automatically add the bucket name to the endpoint domain
35// if possible. This style of bucket is valid for all bucket names which are
36// DNS compatible and do not contain "."
37func updateEndpointForS3Config(r *request.Request) {
38 forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
39 accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
40
41 if accelerate && accelerateOpBlacklist.Continue(r) {
42 if forceHostStyle {
43 if r.Config.Logger != nil {
44 r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
45 }
46 }
47 updateEndpointForAccelerate(r)
48 } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
49 updateEndpointForHostStyle(r)
50 }
51}
52
53func updateEndpointForHostStyle(r *request.Request) {
54 bucket, ok := bucketNameFromReqParams(r.Params)
55 if !ok {
56 // Ignore operation requests if the bucketname was not provided
57 // if this is an input validation error the validation handler
58 // will report it.
59 return
60 }
61
62 if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
63 // bucket name must be valid to put into the host
64 return
65 }
66
67 moveBucketToHost(r.HTTPRequest.URL, bucket)
68}
69
70var (
71 accelElem = []byte("s3-accelerate.dualstack.")
72)
73
74func updateEndpointForAccelerate(r *request.Request) {
75 bucket, ok := bucketNameFromReqParams(r.Params)
76 if !ok {
77 // Ignore operation requests if the bucketname was not provided
78 // if this is an input validation error the validation handler
79 // will report it.
80 return
81 }
82
83 if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
84 r.Error = awserr.New("InvalidParameterException",
85 fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket),
86 nil)
87 return
88 }
89
90 parts := strings.Split(r.HTTPRequest.URL.Host, ".")
91 if len(parts) < 3 {
92 r.Error = awserr.New("InvalidParameterExecption",
93 fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s",
94 r.HTTPRequest.URL.Host), nil)
95 return
96 }
97
98 if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") {
99 parts[0] = "s3-accelerate"
100 }
101 for i := 1; i+1 < len(parts); i++ {
102 if parts[i] == aws.StringValue(r.Config.Region) {
103 parts = append(parts[:i], parts[i+1:]...)
104 break
105 }
106 }
107
108 r.HTTPRequest.URL.Host = strings.Join(parts, ".")
109
110 moveBucketToHost(r.HTTPRequest.URL, bucket)
111}
112
113// Attempts to retrieve the bucket name from the request input parameters.
114// If no bucket is found, or the field is empty "", false will be returned.
115func bucketNameFromReqParams(params interface{}) (string, bool) {
116 b, _ := awsutil.ValuesAtPath(params, "Bucket")
117 if len(b) == 0 {
118 return "", false
119 }
120
121 if bucket, ok := b[0].(*string); ok {
122 if bucketStr := aws.StringValue(bucket); bucketStr != "" {
123 return bucketStr, true
124 }
125 }
126
127 return "", false
128}
129
130// hostCompatibleBucketName returns true if the request should
131// put the bucket in the host. This is false if S3ForcePathStyle is
132// explicitly set or if the bucket is not DNS compatible.
133func hostCompatibleBucketName(u *url.URL, bucket string) bool {
134 // Bucket might be DNS compatible but dots in the hostname will fail
135 // certificate validation, so do not use host-style.
136 if u.Scheme == "https" && strings.Contains(bucket, ".") {
137 return false
138 }
139
140 // if the bucket is DNS compatible
141 return dnsCompatibleBucketName(bucket)
142}
143
144var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
145var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
146
147// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
148// Buckets created outside of the classic region MUST be DNS compatible.
149func dnsCompatibleBucketName(bucket string) bool {
150 return reDomain.MatchString(bucket) &&
151 !reIPAddress.MatchString(bucket) &&
152 !strings.Contains(bucket, "..")
153}
154
155// moveBucketToHost moves the bucket name from the URI path to URL host.
156func moveBucketToHost(u *url.URL, bucket string) {
157 u.Host = bucket + "." + u.Host
158 u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
159 if u.Path == "" {
160 u.Path = "/"
161 }
162}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
new file mode 100644
index 0000000..8e6f330
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
@@ -0,0 +1,8 @@
1// +build !go1.6
2
3package s3
4
5import "github.com/aws/aws-sdk-go/aws/request"
6
7func platformRequestHandlers(r *request.Request) {
8}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
new file mode 100644
index 0000000..14d05f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
@@ -0,0 +1,28 @@
1// +build go1.6
2
3package s3
4
5import (
6 "github.com/aws/aws-sdk-go/aws"
7 "github.com/aws/aws-sdk-go/aws/request"
8)
9
10func platformRequestHandlers(r *request.Request) {
11 if r.Operation.HTTPMethod == "PUT" {
12 // 100-Continue should only be used on put requests.
13 r.Handlers.Sign.PushBack(add100Continue)
14 }
15}
16
17func add100Continue(r *request.Request) {
18 if aws.BoolValue(r.Config.S3Disable100Continue) {
19 return
20 }
21 if r.HTTPRequest.ContentLength < 1024*1024*2 {
22 // Ignore requests smaller than 2MB. This helps prevent delaying
23 // requests unnecessarily.
24 return
25 }
26
27 r.HTTPRequest.Header.Set("Expect", "100-Continue")
28}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
new file mode 100644
index 0000000..614e477
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
@@ -0,0 +1,93 @@
1// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
2
3package s3
4
5import (
6 "github.com/aws/aws-sdk-go/aws"
7 "github.com/aws/aws-sdk-go/aws/client"
8 "github.com/aws/aws-sdk-go/aws/client/metadata"
9 "github.com/aws/aws-sdk-go/aws/request"
10 "github.com/aws/aws-sdk-go/aws/signer/v4"
11 "github.com/aws/aws-sdk-go/private/protocol/restxml"
12)
13
14// S3 provides the API operation methods for making requests to
15// Amazon Simple Storage Service. See this package's package overview docs
16// for details on the service.
17//
18// S3 methods are safe to use concurrently. It is not safe to
19// modify mutate any of the struct's properties though.
20type S3 struct {
21 *client.Client
22}
23
24// Used for custom client initialization logic
25var initClient func(*client.Client)
26
27// Used for custom request initialization logic
28var initRequest func(*request.Request)
29
30// Service information constants
31const (
32 ServiceName = "s3" // Service endpoint prefix API calls made to.
33 EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
34)
35
36// New creates a new instance of the S3 client with a session.
37// If additional configuration is needed for the client instance use the optional
38// aws.Config parameter to add your extra config.
39//
40// Example:
41// // Create a S3 client from just a session.
42// svc := s3.New(mySession)
43//
44// // Create a S3 client with additional configuration
45// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
46func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
47 c := p.ClientConfig(EndpointsID, cfgs...)
48 return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
49}
50
51// newClient creates, initializes and returns a new service client instance.
52func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 {
53 svc := &S3{
54 Client: client.New(
55 cfg,
56 metadata.ClientInfo{
57 ServiceName: ServiceName,
58 SigningName: signingName,
59 SigningRegion: signingRegion,
60 Endpoint: endpoint,
61 APIVersion: "2006-03-01",
62 },
63 handlers,
64 ),
65 }
66
67 // Handlers
68 svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
69 svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
70 svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
71 svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
72 svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
73
74 // Run custom client initialization if present
75 if initClient != nil {
76 initClient(svc.Client)
77 }
78
79 return svc
80}
81
82// newRequest creates a new request for a S3 operation and runs any
83// custom request initialization.
84func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request {
85 req := c.NewRequest(op, params, data)
86
87 // Run custom request initialization if present
88 if initRequest != nil {
89 initRequest(req)
90 }
91
92 return req
93}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
new file mode 100644
index 0000000..268ea2f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
@@ -0,0 +1,44 @@
1package s3
2
3import (
4 "crypto/md5"
5 "encoding/base64"
6
7 "github.com/aws/aws-sdk-go/aws/awserr"
8 "github.com/aws/aws-sdk-go/aws/awsutil"
9 "github.com/aws/aws-sdk-go/aws/request"
10)
11
12var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
13
14func validateSSERequiresSSL(r *request.Request) {
15 if r.HTTPRequest.URL.Scheme != "https" {
16 p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey")
17 if len(p) > 0 {
18 r.Error = errSSERequiresSSL
19 }
20 }
21}
22
23func computeSSEKeys(r *request.Request) {
24 headers := []string{
25 "x-amz-server-side-encryption-customer-key",
26 "x-amz-copy-source-server-side-encryption-customer-key",
27 }
28
29 for _, h := range headers {
30 md5h := h + "-md5"
31 if key := r.HTTPRequest.Header.Get(h); key != "" {
32 // Base64-encode the value
33 b64v := base64.StdEncoding.EncodeToString([]byte(key))
34 r.HTTPRequest.Header.Set(h, b64v)
35
36 // Add MD5 if it wasn't computed
37 if r.HTTPRequest.Header.Get(md5h) == "" {
38 sum := md5.Sum([]byte(key))
39 b64sum := base64.StdEncoding.EncodeToString(sum[:])
40 r.HTTPRequest.Header.Set(md5h, b64sum)
41 }
42 }
43 }
44}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
new file mode 100644
index 0000000..5a78fd3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
@@ -0,0 +1,35 @@
1package s3
2
3import (
4 "bytes"
5 "io/ioutil"
6 "net/http"
7
8 "github.com/aws/aws-sdk-go/aws/awserr"
9 "github.com/aws/aws-sdk-go/aws/request"
10)
11
12func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
13 b, err := ioutil.ReadAll(r.HTTPResponse.Body)
14 if err != nil {
15 r.Error = awserr.New("SerializationError", "unable to read response body", err)
16 return
17 }
18 body := bytes.NewReader(b)
19 r.HTTPResponse.Body = ioutil.NopCloser(body)
20 defer body.Seek(0, 0)
21
22 if body.Len() == 0 {
23 // If there is no body don't attempt to parse the body.
24 return
25 }
26
27 unmarshalError(r)
28 if err, ok := r.Error.(awserr.Error); ok && err != nil {
29 if err.Code() == "SerializationError" {
30 r.Error = nil
31 return
32 }
33 r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
34 }
35}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
new file mode 100644
index 0000000..bcca862
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
@@ -0,0 +1,103 @@
1package s3
2
3import (
4 "encoding/xml"
5 "fmt"
6 "io"
7 "io/ioutil"
8 "net/http"
9 "strings"
10
11 "github.com/aws/aws-sdk-go/aws"
12 "github.com/aws/aws-sdk-go/aws/awserr"
13 "github.com/aws/aws-sdk-go/aws/request"
14)
15
16type xmlErrorResponse struct {
17 XMLName xml.Name `xml:"Error"`
18 Code string `xml:"Code"`
19 Message string `xml:"Message"`
20}
21
22func unmarshalError(r *request.Request) {
23 defer r.HTTPResponse.Body.Close()
24 defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
25
26 hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2")
27
28 // Bucket exists in a different region, and request needs
29 // to be made to the correct region.
30 if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
31 r.Error = requestFailure{
32 RequestFailure: awserr.NewRequestFailure(
33 awserr.New("BucketRegionError",
34 fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
35 aws.StringValue(r.Config.Region)),
36 nil),
37 r.HTTPResponse.StatusCode,
38 r.RequestID,
39 ),
40 hostID: hostID,
41 }
42 return
43 }
44
45 var errCode, errMsg string
46
47 // Attempt to parse error from body if it is known
48 resp := &xmlErrorResponse{}
49 err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
50 if err != nil && err != io.EOF {
51 errCode = "SerializationError"
52 errMsg = "failed to decode S3 XML error response"
53 } else {
54 errCode = resp.Code
55 errMsg = resp.Message
56 err = nil
57 }
58
59 // Fallback to status code converted to message if still no error code
60 if len(errCode) == 0 {
61 statusText := http.StatusText(r.HTTPResponse.StatusCode)
62 errCode = strings.Replace(statusText, " ", "", -1)
63 errMsg = statusText
64 }
65
66 r.Error = requestFailure{
67 RequestFailure: awserr.NewRequestFailure(
68 awserr.New(errCode, errMsg, err),
69 r.HTTPResponse.StatusCode,
70 r.RequestID,
71 ),
72 hostID: hostID,
73 }
74}
75
76// A RequestFailure provides access to the S3 Request ID and Host ID values
77// returned from API operation errors. Getting the error as a string will
78// return the formated error with the same information as awserr.RequestFailure,
79// while also adding the HostID value from the response.
80type RequestFailure interface {
81 awserr.RequestFailure
82
83 // Host ID is the S3 Host ID needed for debug, and contacting support
84 HostID() string
85}
86
87type requestFailure struct {
88 awserr.RequestFailure
89
90 hostID string
91}
92
93func (r requestFailure) Error() string {
94 extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s",
95 r.StatusCode(), r.RequestID(), r.hostID)
96 return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr())
97}
98func (r requestFailure) String() string {
99 return r.Error()
100}
101func (r requestFailure) HostID() string {
102 return r.hostID
103}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
new file mode 100644
index 0000000..cccfa8c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
@@ -0,0 +1,214 @@
1// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
2
3package s3
4
5import (
6 "time"
7
8 "github.com/aws/aws-sdk-go/aws"
9 "github.com/aws/aws-sdk-go/aws/request"
10)
11
12// WaitUntilBucketExists uses the Amazon S3 API operation
13// HeadBucket to wait for a condition to be met before returning.
14// If the condition is not meet within the max attempt window an error will
15// be returned.
16func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
17 return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input)
18}
19
20// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists.
21// With the support for passing in a context and options to configure the
22// Waiter and the underlying request options.
23//
24// The context must be non-nil and will be used for request cancellation. If
25// the context is nil a panic will occur. In the future the SDK may create
26// sub-contexts for http.Requests. See https://golang.org/pkg/context/
27// for more information on using Contexts.
28func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
29 w := request.Waiter{
30 Name: "WaitUntilBucketExists",
31 MaxAttempts: 20,
32 Delay: request.ConstantWaiterDelay(5 * time.Second),
33 Acceptors: []request.WaiterAcceptor{
34 {
35 State: request.SuccessWaiterState,
36 Matcher: request.StatusWaiterMatch,
37 Expected: 200,
38 },
39 {
40 State: request.SuccessWaiterState,
41 Matcher: request.StatusWaiterMatch,
42 Expected: 301,
43 },
44 {
45 State: request.SuccessWaiterState,
46 Matcher: request.StatusWaiterMatch,
47 Expected: 403,
48 },
49 {
50 State: request.RetryWaiterState,
51 Matcher: request.StatusWaiterMatch,
52 Expected: 404,
53 },
54 },
55 Logger: c.Config.Logger,
56 NewRequest: func(opts []request.Option) (*request.Request, error) {
57 var inCpy *HeadBucketInput
58 if input != nil {
59 tmp := *input
60 inCpy = &tmp
61 }
62 req, _ := c.HeadBucketRequest(inCpy)
63 req.SetContext(ctx)
64 req.ApplyOptions(opts...)
65 return req, nil
66 },
67 }
68 w.ApplyOptions(opts...)
69
70 return w.WaitWithContext(ctx)
71}
72
73// WaitUntilBucketNotExists uses the Amazon S3 API operation
74// HeadBucket to wait for a condition to be met before returning.
75// If the condition is not meet within the max attempt window an error will
76// be returned.
77func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
78 return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input)
79}
80
81// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists.
82// With the support for passing in a context and options to configure the
83// Waiter and the underlying request options.
84//
85// The context must be non-nil and will be used for request cancellation. If
86// the context is nil a panic will occur. In the future the SDK may create
87// sub-contexts for http.Requests. See https://golang.org/pkg/context/
88// for more information on using Contexts.
89func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
90 w := request.Waiter{
91 Name: "WaitUntilBucketNotExists",
92 MaxAttempts: 20,
93 Delay: request.ConstantWaiterDelay(5 * time.Second),
94 Acceptors: []request.WaiterAcceptor{
95 {
96 State: request.SuccessWaiterState,
97 Matcher: request.StatusWaiterMatch,
98 Expected: 404,
99 },
100 },
101 Logger: c.Config.Logger,
102 NewRequest: func(opts []request.Option) (*request.Request, error) {
103 var inCpy *HeadBucketInput
104 if input != nil {
105 tmp := *input
106 inCpy = &tmp
107 }
108 req, _ := c.HeadBucketRequest(inCpy)
109 req.SetContext(ctx)
110 req.ApplyOptions(opts...)
111 return req, nil
112 },
113 }
114 w.ApplyOptions(opts...)
115
116 return w.WaitWithContext(ctx)
117}
118
119// WaitUntilObjectExists uses the Amazon S3 API operation
120// HeadObject to wait for a condition to be met before returning.
121// If the condition is not meet within the max attempt window an error will
122// be returned.
123func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
124 return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input)
125}
126
127// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists.
128// With the support for passing in a context and options to configure the
129// Waiter and the underlying request options.
130//
131// The context must be non-nil and will be used for request cancellation. If
132// the context is nil a panic will occur. In the future the SDK may create
133// sub-contexts for http.Requests. See https://golang.org/pkg/context/
134// for more information on using Contexts.
135func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
136 w := request.Waiter{
137 Name: "WaitUntilObjectExists",
138 MaxAttempts: 20,
139 Delay: request.ConstantWaiterDelay(5 * time.Second),
140 Acceptors: []request.WaiterAcceptor{
141 {
142 State: request.SuccessWaiterState,
143 Matcher: request.StatusWaiterMatch,
144 Expected: 200,
145 },
146 {
147 State: request.RetryWaiterState,
148 Matcher: request.StatusWaiterMatch,
149 Expected: 404,
150 },
151 },
152 Logger: c.Config.Logger,
153 NewRequest: func(opts []request.Option) (*request.Request, error) {
154 var inCpy *HeadObjectInput
155 if input != nil {
156 tmp := *input
157 inCpy = &tmp
158 }
159 req, _ := c.HeadObjectRequest(inCpy)
160 req.SetContext(ctx)
161 req.ApplyOptions(opts...)
162 return req, nil
163 },
164 }
165 w.ApplyOptions(opts...)
166
167 return w.WaitWithContext(ctx)
168}
169
170// WaitUntilObjectNotExists uses the Amazon S3 API operation
171// HeadObject to wait for a condition to be met before returning.
172// If the condition is not meet within the max attempt window an error will
173// be returned.
174func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
175 return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input)
176}
177
178// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists.
179// With the support for passing in a context and options to configure the
180// Waiter and the underlying request options.
181//
182// The context must be non-nil and will be used for request cancellation. If
183// the context is nil a panic will occur. In the future the SDK may create
184// sub-contexts for http.Requests. See https://golang.org/pkg/context/
185// for more information on using Contexts.
186func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
187 w := request.Waiter{
188 Name: "WaitUntilObjectNotExists",
189 MaxAttempts: 20,
190 Delay: request.ConstantWaiterDelay(5 * time.Second),
191 Acceptors: []request.WaiterAcceptor{
192 {
193 State: request.SuccessWaiterState,
194 Matcher: request.StatusWaiterMatch,
195 Expected: 404,
196 },
197 },
198 Logger: c.Config.Logger,
199 NewRequest: func(opts []request.Option) (*request.Request, error) {
200 var inCpy *HeadObjectInput
201 if input != nil {
202 tmp := *input
203 inCpy = &tmp
204 }
205 req, _ := c.HeadObjectRequest(inCpy)
206 req.SetContext(ctx)
207 req.ApplyOptions(opts...)
208 return req, nil
209 },
210 }
211 w.ApplyOptions(opts...)
212
213 return w.WaitWithContext(ctx)
214}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
new file mode 100644
index 0000000..2de6528
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -0,0 +1,2365 @@
1// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
2
3package sts
4
5import (
6 "time"
7
8 "github.com/aws/aws-sdk-go/aws"
9 "github.com/aws/aws-sdk-go/aws/awsutil"
10 "github.com/aws/aws-sdk-go/aws/request"
11)
12
13const opAssumeRole = "AssumeRole"
14
15// AssumeRoleRequest generates a "aws/request.Request" representing the
16// client's request for the AssumeRole operation. The "output" return
17// value can be used to capture response data after the request's "Send" method
18// is called.
19//
20// See AssumeRole for usage and error information.
21//
22// Creating a request object using this method should be used when you want to inject
23// custom logic into the request's lifecycle using a custom handler, or if you want to
24// access properties on the request object before or after sending the request. If
25// you just want the service response, call the AssumeRole method directly
26// instead.
27//
28// Note: You must call the "Send" method on the returned request object in order
29// to execute the request.
30//
31// // Example sending a request using the AssumeRoleRequest method.
32// req, resp := client.AssumeRoleRequest(params)
33//
34// err := req.Send()
35// if err == nil { // resp is now filled
36// fmt.Println(resp)
37// }
38//
39// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
40func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
41 op := &request.Operation{
42 Name: opAssumeRole,
43 HTTPMethod: "POST",
44 HTTPPath: "/",
45 }
46
47 if input == nil {
48 input = &AssumeRoleInput{}
49 }
50
51 output = &AssumeRoleOutput{}
52 req = c.newRequest(op, input, output)
53 return
54}
55
56// AssumeRole API operation for AWS Security Token Service.
57//
58// Returns a set of temporary security credentials (consisting of an access
59// key ID, a secret access key, and a security token) that you can use to access
60// AWS resources that you might not normally have access to. Typically, you
61// use AssumeRole for cross-account access or federation. For a comparison of
62// AssumeRole with the other APIs that produce temporary credentials, see Requesting
63// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
64// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
65// in the IAM User Guide.
66//
67// Important: You cannot call AssumeRole by using AWS root account credentials;
68// access is denied. You must use credentials for an IAM user or an IAM role
69// to call AssumeRole.
70//
71// For cross-account access, imagine that you own multiple accounts and need
72// to access resources in each account. You could create long-term credentials
73// in each account to access those resources. However, managing all those credentials
74// and remembering which one can access which account can be time consuming.
75// Instead, you can create one set of long-term credentials in one account and
76// then use temporary security credentials to access all the other accounts
77// by assuming roles in those accounts. For more information about roles, see
78// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html)
79// in the IAM User Guide.
80//
81// For federation, you can, for example, grant single sign-on access to the
82// AWS Management Console. If you already have an identity and authentication
83// system in your corporate network, you don't have to recreate user identities
84// in AWS in order to grant those user identities access to AWS. Instead, after
85// a user has been authenticated, you call AssumeRole (and specify the role
86// with the appropriate permissions) to get temporary security credentials for
87// that user. With those temporary security credentials, you construct a sign-in
88// URL that users can use to access the console. For more information, see Common
89// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
90// in the IAM User Guide.
91//
92// The temporary security credentials are valid for the duration that you specified
93// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a
94// maximum of 3600 seconds (1 hour). The default is 1 hour.
95//
96// The temporary security credentials created by AssumeRole can be used to make
97// API calls to any AWS service with the following exception: you cannot call
98// the STS service's GetFederationToken or GetSessionToken APIs.
99//
100// Optionally, you can pass an IAM access policy to this operation. If you choose
101// not to pass a policy, the temporary security credentials that are returned
102// by the operation have the permissions that are defined in the access policy
103// of the role that is being assumed. If you pass a policy to this operation,
104// the temporary security credentials that are returned by the operation have
105// the permissions that are allowed by both the access policy of the role that
106// is being assumed, and the policy that you pass. This gives you a way to further
107// restrict the permissions for the resulting temporary security credentials.
108// You cannot use the passed policy to grant permissions that are in excess
109// of those allowed by the access policy of the role that is being assumed.
110// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
111// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
112// in the IAM User Guide.
113//
114// To assume a role, your AWS account must be trusted by the role. The trust
115// relationship is defined in the role's trust policy when the role is created.
116// That trust policy states which accounts are allowed to delegate access to
117// this account's role.
118//
119// The user who wants to access the role must also have permissions delegated
120// from the role's administrator. If the user is in a different account than
121// the role, then the user's administrator must attach a policy that allows
122// the user to call AssumeRole on the ARN of the role in the other account.
123// If the user is in the same account as the role, then you can either attach
124// a policy to the user (identical to the previous different account user),
125// or you can add the user as a principal directly in the role's trust policy
126//
127// Using MFA with AssumeRole
128//
129// You can optionally include multi-factor authentication (MFA) information
130// when you call AssumeRole. This is useful for cross-account scenarios in which
131// you want to make sure that the user who is assuming the role has been authenticated
132// using an AWS MFA device. In that scenario, the trust policy of the role being
133// assumed includes a condition that tests for MFA authentication; if the caller
134// does not include valid MFA information, the request to assume the role is
135// denied. The condition in a trust policy that tests for MFA authentication
136// might look like the following example.
137//
138// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
139//
140// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
141// in the IAM User Guide guide.
142//
143// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
144// parameters. The SerialNumber value identifies the user's hardware or virtual
145// MFA device. The TokenCode is the time-based one-time password (TOTP) that
146// the MFA devices produces.
147//
148// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
149// with awserr.Error's Code and Message methods to get detailed information about
150// the error.
151//
152// See the AWS API reference guide for AWS Security Token Service's
153// API operation AssumeRole for usage and error information.
154//
155// Returned Error Codes:
156// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
157// The request was rejected because the policy document was malformed. The error
158// message describes the specific error.
159//
160// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
161// The request was rejected because the policy document was too large. The error
162// message describes how big the policy document is, in packed form, as a percentage
163// of what the API allows.
164//
165// * ErrCodeRegionDisabledException "RegionDisabledException"
166// STS is not activated in the requested region for the account that is being
167// asked to generate credentials. The account administrator must use the IAM
168// console to activate STS in that region. For more information, see Activating
169// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
170// in the IAM User Guide.
171//
172// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
173func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
174 req, out := c.AssumeRoleRequest(input)
175 return out, req.Send()
176}
177
178// AssumeRoleWithContext is the same as AssumeRole with the addition of
179// the ability to pass a context and additional request options.
180//
181// See AssumeRole for details on how to use this API operation.
182//
183// The context must be non-nil and will be used for request cancellation. If
184// the context is nil a panic will occur. In the future the SDK may create
185// sub-contexts for http.Requests. See https://golang.org/pkg/context/
186// for more information on using Contexts.
187func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) {
188 req, out := c.AssumeRoleRequest(input)
189 req.SetContext(ctx)
190 req.ApplyOptions(opts...)
191 return out, req.Send()
192}
193
194const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
195
196// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
197// client's request for the AssumeRoleWithSAML operation. The "output" return
198// value can be used to capture response data after the request's "Send" method
199// is called.
200//
201// See AssumeRoleWithSAML for usage and error information.
202//
203// Creating a request object using this method should be used when you want to inject
204// custom logic into the request's lifecycle using a custom handler, or if you want to
205// access properties on the request object before or after sending the request. If
206// you just want the service response, call the AssumeRoleWithSAML method directly
207// instead.
208//
209// Note: You must call the "Send" method on the returned request object in order
210// to execute the request.
211//
212// // Example sending a request using the AssumeRoleWithSAMLRequest method.
213// req, resp := client.AssumeRoleWithSAMLRequest(params)
214//
215// err := req.Send()
216// if err == nil { // resp is now filled
217// fmt.Println(resp)
218// }
219//
220// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
221func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
222 op := &request.Operation{
223 Name: opAssumeRoleWithSAML,
224 HTTPMethod: "POST",
225 HTTPPath: "/",
226 }
227
228 if input == nil {
229 input = &AssumeRoleWithSAMLInput{}
230 }
231
232 output = &AssumeRoleWithSAMLOutput{}
233 req = c.newRequest(op, input, output)
234 return
235}
236
237// AssumeRoleWithSAML API operation for AWS Security Token Service.
238//
239// Returns a set of temporary security credentials for users who have been authenticated
240// via a SAML authentication response. This operation provides a mechanism for
241// tying an enterprise identity store or directory to role-based AWS access
242// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML
243// with the other APIs that produce temporary credentials, see Requesting Temporary
244// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
245// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
246// in the IAM User Guide.
247//
248// The temporary security credentials returned by this operation consist of
249// an access key ID, a secret access key, and a security token. Applications
250// can use these temporary security credentials to sign calls to AWS services.
251//
252// The temporary security credentials are valid for the duration that you specified
253// when calling AssumeRole, or until the time specified in the SAML authentication
254// response's SessionNotOnOrAfter value, whichever is shorter. The duration
255// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour).
256// The default is 1 hour.
257//
258// The temporary security credentials created by AssumeRoleWithSAML can be used
259// to make API calls to any AWS service with the following exception: you cannot
260// call the STS service's GetFederationToken or GetSessionToken APIs.
261//
262// Optionally, you can pass an IAM access policy to this operation. If you choose
263// not to pass a policy, the temporary security credentials that are returned
264// by the operation have the permissions that are defined in the access policy
265// of the role that is being assumed. If you pass a policy to this operation,
266// the temporary security credentials that are returned by the operation have
267// the permissions that are allowed by the intersection of both the access policy
268// of the role that is being assumed, and the policy that you pass. This means
269// that both policies must grant the permission for the action to be allowed.
270// This gives you a way to further restrict the permissions for the resulting
271// temporary security credentials. You cannot use the passed policy to grant
272// permissions that are in excess of those allowed by the access policy of the
273// role that is being assumed. For more information, see Permissions for AssumeRole,
274// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
275// in the IAM User Guide.
276//
277// Before your application can call AssumeRoleWithSAML, you must configure your
278// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
279// you must use AWS Identity and Access Management (IAM) to create a SAML provider
280// entity in your AWS account that represents your identity provider, and create
281// an IAM role that specifies this SAML provider in its trust policy.
282//
283// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
284// The identity of the caller is validated by using keys in the metadata document
285// that is uploaded for the SAML provider entity for your identity provider.
286//
287// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
288// logs. The entry includes the value in the NameID element of the SAML assertion.
289// We recommend that you use a NameIDType that is not associated with any personally
290// identifiable information (PII). For example, you could instead use the Persistent
291// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
292//
293// For more information, see the following resources:
294//
295// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
296// in the IAM User Guide.
297//
298// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
299// in the IAM User Guide.
300//
301// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
302// in the IAM User Guide.
303//
304// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
305// in the IAM User Guide.
306//
307// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
308// with awserr.Error's Code and Message methods to get detailed information about
309// the error.
310//
311// See the AWS API reference guide for AWS Security Token Service's
312// API operation AssumeRoleWithSAML for usage and error information.
313//
314// Returned Error Codes:
315// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
316// The request was rejected because the policy document was malformed. The error
317// message describes the specific error.
318//
319// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
320// The request was rejected because the policy document was too large. The error
321// message describes how big the policy document is, in packed form, as a percentage
322// of what the API allows.
323//
324// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
325// The identity provider (IdP) reported that authentication failed. This might
326// be because the claim is invalid.
327//
328// If this error is returned for the AssumeRoleWithWebIdentity operation, it
329// can also mean that the claim has expired or has been explicitly revoked.
330//
331// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
332// The web identity token that was passed could not be validated by AWS. Get
333// a new identity token from the identity provider and then retry the request.
334//
335// * ErrCodeExpiredTokenException "ExpiredTokenException"
336// The web identity token that was passed is expired or is not valid. Get a
337// new identity token from the identity provider and then retry the request.
338//
339// * ErrCodeRegionDisabledException "RegionDisabledException"
340// STS is not activated in the requested region for the account that is being
341// asked to generate credentials. The account administrator must use the IAM
342// console to activate STS in that region. For more information, see Activating
343// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
344// in the IAM User Guide.
345//
346// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
347func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
348 req, out := c.AssumeRoleWithSAMLRequest(input)
349 return out, req.Send()
350}
351
352// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of
353// the ability to pass a context and additional request options.
354//
355// See AssumeRoleWithSAML for details on how to use this API operation.
356//
357// The context must be non-nil and will be used for request cancellation. If
358// the context is nil a panic will occur. In the future the SDK may create
359// sub-contexts for http.Requests. See https://golang.org/pkg/context/
360// for more information on using Contexts.
361func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) {
362 req, out := c.AssumeRoleWithSAMLRequest(input)
363 req.SetContext(ctx)
364 req.ApplyOptions(opts...)
365 return out, req.Send()
366}
367
368const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
369
370// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
371// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
372// value can be used to capture response data after the request's "Send" method
373// is called.
374//
375// See AssumeRoleWithWebIdentity for usage and error information.
376//
377// Creating a request object using this method should be used when you want to inject
378// custom logic into the request's lifecycle using a custom handler, or if you want to
379// access properties on the request object before or after sending the request. If
380// you just want the service response, call the AssumeRoleWithWebIdentity method directly
381// instead.
382//
383// Note: You must call the "Send" method on the returned request object in order
384// to execute the request.
385//
386// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
387// req, resp := client.AssumeRoleWithWebIdentityRequest(params)
388//
389// err := req.Send()
390// if err == nil { // resp is now filled
391// fmt.Println(resp)
392// }
393//
394// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
395func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
396 op := &request.Operation{
397 Name: opAssumeRoleWithWebIdentity,
398 HTTPMethod: "POST",
399 HTTPPath: "/",
400 }
401
402 if input == nil {
403 input = &AssumeRoleWithWebIdentityInput{}
404 }
405
406 output = &AssumeRoleWithWebIdentityOutput{}
407 req = c.newRequest(op, input, output)
408 return
409}
410
411// AssumeRoleWithWebIdentity API operation for AWS Security Token Service.
412//
413// Returns a set of temporary security credentials for users who have been authenticated
414// in a mobile or web application with a web identity provider, such as Amazon
415// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible
416// identity provider.
417//
418// For mobile applications, we recommend that you use Amazon Cognito. You can
419// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/)
420// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely
421// identify a user and supply the user with a consistent identity throughout
422// the lifetime of an application.
423//
424// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
425// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview
426// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
427// in the AWS SDK for iOS Developer Guide.
428//
429// Calling AssumeRoleWithWebIdentity does not require the use of AWS security
430// credentials. Therefore, you can distribute an application (for example, on
431// mobile devices) that requests temporary security credentials without including
432// long-term AWS credentials in the application, and without deploying server-based
433// proxy services that use long-term AWS credentials. Instead, the identity
434// of the caller is validated by using a token from the web identity provider.
435// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce
436// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
437// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
438// in the IAM User Guide.
439//
440// The temporary security credentials returned by this API consist of an access
441// key ID, a secret access key, and a security token. Applications can use these
442// temporary security credentials to sign calls to AWS service APIs.
443//
444// The credentials are valid for the duration that you specified when calling
445// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to
446// a maximum of 3600 seconds (1 hour). The default is 1 hour.
447//
448// The temporary security credentials created by AssumeRoleWithWebIdentity can
449// be used to make API calls to any AWS service with the following exception:
450// you cannot call the STS service's GetFederationToken or GetSessionToken APIs.
451//
452// Optionally, you can pass an IAM access policy to this operation. If you choose
453// not to pass a policy, the temporary security credentials that are returned
454// by the operation have the permissions that are defined in the access policy
455// of the role that is being assumed. If you pass a policy to this operation,
456// the temporary security credentials that are returned by the operation have
457// the permissions that are allowed by both the access policy of the role that
458// is being assumed, and the policy that you pass. This gives you a way to further
459// restrict the permissions for the resulting temporary security credentials.
460// You cannot use the passed policy to grant permissions that are in excess
461// of those allowed by the access policy of the role that is being assumed.
462// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
463// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
464// in the IAM User Guide.
465//
466// Before your application can call AssumeRoleWithWebIdentity, you must have
467// an identity token from a supported identity provider and create a role that
468// the application can assume. The role that your application assumes must trust
469// the identity provider that is associated with the identity token. In other
470// words, the identity provider must be specified in the role's trust policy.
471//
472// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
473// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
474// of the provided Web Identity Token. We recommend that you avoid using any
475// personally identifiable information (PII) in this field. For example, you
476// could instead use a GUID or a pairwise identifier, as suggested in the OIDC
477// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
478//
479// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
480// API, see the following resources:
481//
482// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
483// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
484//
485//
486// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
487// This interactive website lets you walk through the process of authenticating
488// via Login with Amazon, Facebook, or Google, getting temporary security
489// credentials, and then using those credentials to make a request to AWS.
490//
491//
492// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android
493// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample
494// apps that show how to invoke the identity providers, and then how to use
495// the information from these providers to get and use temporary security
496// credentials.
497//
498// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313).
499// This article discusses web identity federation and shows an example of
500// how to use web identity federation to get access to content in Amazon
501// S3.
502//
503// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
504// with awserr.Error's Code and Message methods to get detailed information about
505// the error.
506//
507// See the AWS API reference guide for AWS Security Token Service's
508// API operation AssumeRoleWithWebIdentity for usage and error information.
509//
510// Returned Error Codes:
511// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
512// The request was rejected because the policy document was malformed. The error
513// message describes the specific error.
514//
515// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
516// The request was rejected because the policy document was too large. The error
517// message describes how big the policy document is, in packed form, as a percentage
518// of what the API allows.
519//
520// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
521// The identity provider (IdP) reported that authentication failed. This might
522// be because the claim is invalid.
523//
524// If this error is returned for the AssumeRoleWithWebIdentity operation, it
525// can also mean that the claim has expired or has been explicitly revoked.
526//
527// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError"
528// The request could not be fulfilled because the non-AWS identity provider
529// (IDP) that was asked to verify the incoming identity token could not be reached.
530// This is often a transient error caused by network conditions. Retry the request
531// a limited number of times so that you don't exceed the request rate. If the
532// error persists, the non-AWS identity provider might be down or not responding.
533//
534// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
535// The web identity token that was passed could not be validated by AWS. Get
536// a new identity token from the identity provider and then retry the request.
537//
538// * ErrCodeExpiredTokenException "ExpiredTokenException"
539// The web identity token that was passed is expired or is not valid. Get a
540// new identity token from the identity provider and then retry the request.
541//
542// * ErrCodeRegionDisabledException "RegionDisabledException"
543// STS is not activated in the requested region for the account that is being
544// asked to generate credentials. The account administrator must use the IAM
545// console to activate STS in that region. For more information, see Activating
546// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
547// in the IAM User Guide.
548//
549// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
550func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
551 req, out := c.AssumeRoleWithWebIdentityRequest(input)
552 return out, req.Send()
553}
554
555// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of
556// the ability to pass a context and additional request options.
557//
558// See AssumeRoleWithWebIdentity for details on how to use this API operation.
559//
560// The context must be non-nil and will be used for request cancellation. If
561// the context is nil a panic will occur. In the future the SDK may create
562// sub-contexts for http.Requests. See https://golang.org/pkg/context/
563// for more information on using Contexts.
564func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) {
565 req, out := c.AssumeRoleWithWebIdentityRequest(input)
566 req.SetContext(ctx)
567 req.ApplyOptions(opts...)
568 return out, req.Send()
569}
570
571const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
572
573// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
574// client's request for the DecodeAuthorizationMessage operation. The "output" return
575// value can be used to capture response data after the request's "Send" method
576// is called.
577//
578// See DecodeAuthorizationMessage for usage and error information.
579//
580// Creating a request object using this method should be used when you want to inject
581// custom logic into the request's lifecycle using a custom handler, or if you want to
582// access properties on the request object before or after sending the request. If
583// you just want the service response, call the DecodeAuthorizationMessage method directly
584// instead.
585//
586// Note: You must call the "Send" method on the returned request object in order
587// to execute the request.
588//
589// // Example sending a request using the DecodeAuthorizationMessageRequest method.
590// req, resp := client.DecodeAuthorizationMessageRequest(params)
591//
592// err := req.Send()
593// if err == nil { // resp is now filled
594// fmt.Println(resp)
595// }
596//
597// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
598func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
599 op := &request.Operation{
600 Name: opDecodeAuthorizationMessage,
601 HTTPMethod: "POST",
602 HTTPPath: "/",
603 }
604
605 if input == nil {
606 input = &DecodeAuthorizationMessageInput{}
607 }
608
609 output = &DecodeAuthorizationMessageOutput{}
610 req = c.newRequest(op, input, output)
611 return
612}
613
614// DecodeAuthorizationMessage API operation for AWS Security Token Service.
615//
616// Decodes additional information about the authorization status of a request
617// from an encoded message returned in response to an AWS request.
618//
619// For example, if a user is not authorized to perform an action that he or
620// she has requested, the request returns a Client.UnauthorizedOperation response
621// (an HTTP 403 response). Some AWS actions additionally return an encoded message
622// that can provide details about this authorization failure.
623//
624// Only certain AWS actions return an encoded authorization message. The documentation
625// for an individual action indicates whether that action returns an encoded
626// message in addition to returning an HTTP code.
627//
628// The message is encoded because the details of the authorization status can
629// constitute privileged information that the user who requested the action
630// should not see. To decode an authorization status message, a user must be
631// granted permissions via an IAM policy to request the DecodeAuthorizationMessage
632// (sts:DecodeAuthorizationMessage) action.
633//
634// The decoded message includes the following type of information:
635//
636// * Whether the request was denied due to an explicit deny or due to the
637// absence of an explicit allow. For more information, see Determining Whether
638// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
639// in the IAM User Guide.
640//
641// * The principal who made the request.
642//
643// * The requested action.
644//
645// * The requested resource.
646//
647// * The values of condition keys in the context of the user's request.
648//
649// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
650// with awserr.Error's Code and Message methods to get detailed information about
651// the error.
652//
653// See the AWS API reference guide for AWS Security Token Service's
654// API operation DecodeAuthorizationMessage for usage and error information.
655//
656// Returned Error Codes:
657// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
658// The error returned if the message passed to DecodeAuthorizationMessage was
659// invalid. This can happen if the token contains invalid characters, such as
660// linebreaks.
661//
662// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
663func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
664 req, out := c.DecodeAuthorizationMessageRequest(input)
665 return out, req.Send()
666}
667
668// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of
669// the ability to pass a context and additional request options.
670//
671// See DecodeAuthorizationMessage for details on how to use this API operation.
672//
673// The context must be non-nil and will be used for request cancellation. If
674// the context is nil a panic will occur. In the future the SDK may create
675// sub-contexts for http.Requests. See https://golang.org/pkg/context/
676// for more information on using Contexts.
677func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) {
678 req, out := c.DecodeAuthorizationMessageRequest(input)
679 req.SetContext(ctx)
680 req.ApplyOptions(opts...)
681 return out, req.Send()
682}
683
684const opGetCallerIdentity = "GetCallerIdentity"
685
686// GetCallerIdentityRequest generates a "aws/request.Request" representing the
687// client's request for the GetCallerIdentity operation. The "output" return
688// value can be used to capture response data after the request's "Send" method
689// is called.
690//
691// See GetCallerIdentity for usage and error information.
692//
693// Creating a request object using this method should be used when you want to inject
694// custom logic into the request's lifecycle using a custom handler, or if you want to
695// access properties on the request object before or after sending the request. If
696// you just want the service response, call the GetCallerIdentity method directly
697// instead.
698//
699// Note: You must call the "Send" method on the returned request object in order
700// to execute the request.
701//
702// // Example sending a request using the GetCallerIdentityRequest method.
703// req, resp := client.GetCallerIdentityRequest(params)
704//
705// err := req.Send()
706// if err == nil { // resp is now filled
707// fmt.Println(resp)
708// }
709//
710// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
711func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) {
712 op := &request.Operation{
713 Name: opGetCallerIdentity,
714 HTTPMethod: "POST",
715 HTTPPath: "/",
716 }
717
718 if input == nil {
719 input = &GetCallerIdentityInput{}
720 }
721
722 output = &GetCallerIdentityOutput{}
723 req = c.newRequest(op, input, output)
724 return
725}
726
727// GetCallerIdentity API operation for AWS Security Token Service.
728//
729// Returns details about the IAM identity whose credentials are used to call
730// the API.
731//
732// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
733// with awserr.Error's Code and Message methods to get detailed information about
734// the error.
735//
736// See the AWS API reference guide for AWS Security Token Service's
737// API operation GetCallerIdentity for usage and error information.
738// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
739func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) {
740 req, out := c.GetCallerIdentityRequest(input)
741 return out, req.Send()
742}
743
744// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of
745// the ability to pass a context and additional request options.
746//
747// See GetCallerIdentity for details on how to use this API operation.
748//
749// The context must be non-nil and will be used for request cancellation. If
750// the context is nil a panic will occur. In the future the SDK may create
751// sub-contexts for http.Requests. See https://golang.org/pkg/context/
752// for more information on using Contexts.
753func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) {
754 req, out := c.GetCallerIdentityRequest(input)
755 req.SetContext(ctx)
756 req.ApplyOptions(opts...)
757 return out, req.Send()
758}
759
760const opGetFederationToken = "GetFederationToken"
761
762// GetFederationTokenRequest generates a "aws/request.Request" representing the
763// client's request for the GetFederationToken operation. The "output" return
764// value can be used to capture response data after the request's "Send" method
765// is called.
766//
767// See GetFederationToken for usage and error information.
768//
769// Creating a request object using this method should be used when you want to inject
770// custom logic into the request's lifecycle using a custom handler, or if you want to
771// access properties on the request object before or after sending the request. If
772// you just want the service response, call the GetFederationToken method directly
773// instead.
774//
775// Note: You must call the "Send" method on the returned request object in order
776// to execute the request.
777//
778// // Example sending a request using the GetFederationTokenRequest method.
779// req, resp := client.GetFederationTokenRequest(params)
780//
781// err := req.Send()
782// if err == nil { // resp is now filled
783// fmt.Println(resp)
784// }
785//
786// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
787func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
788 op := &request.Operation{
789 Name: opGetFederationToken,
790 HTTPMethod: "POST",
791 HTTPPath: "/",
792 }
793
794 if input == nil {
795 input = &GetFederationTokenInput{}
796 }
797
798 output = &GetFederationTokenOutput{}
799 req = c.newRequest(op, input, output)
800 return
801}
802
803// GetFederationToken API operation for AWS Security Token Service.
804//
805// Returns a set of temporary security credentials (consisting of an access
806// key ID, a secret access key, and a security token) for a federated user.
807// A typical use is in a proxy application that gets temporary security credentials
808// on behalf of distributed applications inside a corporate network. Because
809// you must call the GetFederationToken action using the long-term security
810// credentials of an IAM user, this call is appropriate in contexts where those
811// credentials can be safely stored, usually in a server-based application.
812// For a comparison of GetFederationToken with the other APIs that produce temporary
813// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
814// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
815// in the IAM User Guide.
816//
817// If you are creating a mobile-based or browser-based app that can authenticate
818// users using a web identity provider like Login with Amazon, Facebook, Google,
819// or an OpenID Connect-compatible identity provider, we recommend that you
820// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
821// For more information, see Federation Through a Web-based Identity Provider
822// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
823//
824// The GetFederationToken action must be called by using the long-term AWS security
825// credentials of an IAM user. You can also call GetFederationToken using the
826// security credentials of an AWS root account, but we do not recommended it.
827// Instead, we recommend that you create an IAM user for the purpose of the
828// proxy application and then attach a policy to the IAM user that limits federated
829// users to only the actions and resources that they need access to. For more
830// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
831// in the IAM User Guide.
832//
833// The temporary security credentials that are obtained by using the long-term
834// credentials of an IAM user are valid for the specified duration, from 900
835// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default
836// is 43200 seconds (12 hours). Temporary credentials that are obtained by using
837// AWS root account credentials have a maximum duration of 3600 seconds (1 hour).
838//
839// The temporary security credentials created by GetFederationToken can be used
840// to make API calls to any AWS service with the following exceptions:
841//
842// * You cannot use these credentials to call any IAM APIs.
843//
844// * You cannot call any STS APIs except GetCallerIdentity.
845//
846// Permissions
847//
848// The permissions for the temporary security credentials returned by GetFederationToken
849// are determined by a combination of the following:
850//
851// * The policy or policies that are attached to the IAM user whose credentials
852// are used to call GetFederationToken.
853//
854// * The policy that is passed as a parameter in the call.
855//
856// The passed policy is attached to the temporary security credentials that
857// result from the GetFederationToken API call--that is, to the federated user.
858// When the federated user makes an AWS request, AWS evaluates the policy attached
859// to the federated user in combination with the policy or policies attached
860// to the IAM user whose credentials were used to call GetFederationToken. AWS
861// allows the federated user's request only when both the federated user and
862// the IAM user are explicitly allowed to perform the requested action. The
863// passed policy cannot grant more permissions than those that are defined in
864// the IAM user policy.
865//
866// A typical use case is that the permissions of the IAM user whose credentials
867// are used to call GetFederationToken are designed to allow access to all the
868// actions and resources that any federated user will need. Then, for individual
869// users, you pass a policy to the operation that scopes down the permissions
870// to a level that's appropriate to that individual user, using a policy that
871// allows only a subset of permissions that are granted to the IAM user.
872//
873// If you do not pass a policy, the resulting temporary security credentials
874// have no effective permissions. The only exception is when the temporary security
875// credentials are used to access a resource that has a resource-based policy
876// that specifically allows the federated user to access the resource.
877//
878// For more information about how permissions work, see Permissions for GetFederationToken
879// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
880// For information about using GetFederationToken to create temporary security
881// credentials, see GetFederationToken—Federation Through a Custom Identity
882// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
883//
884// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
885// with awserr.Error's Code and Message methods to get detailed information about
886// the error.
887//
888// See the AWS API reference guide for AWS Security Token Service's
889// API operation GetFederationToken for usage and error information.
890//
891// Returned Error Codes:
892// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
893// The request was rejected because the policy document was malformed. The error
894// message describes the specific error.
895//
896// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
897// The request was rejected because the policy document was too large. The error
898// message describes how big the policy document is, in packed form, as a percentage
899// of what the API allows.
900//
901// * ErrCodeRegionDisabledException "RegionDisabledException"
902// STS is not activated in the requested region for the account that is being
903// asked to generate credentials. The account administrator must use the IAM
904// console to activate STS in that region. For more information, see Activating
905// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
906// in the IAM User Guide.
907//
908// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
909func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
910 req, out := c.GetFederationTokenRequest(input)
911 return out, req.Send()
912}
913
914// GetFederationTokenWithContext is the same as GetFederationToken with the addition of
915// the ability to pass a context and additional request options.
916//
917// See GetFederationToken for details on how to use this API operation.
918//
919// The context must be non-nil and will be used for request cancellation. If
920// the context is nil a panic will occur. In the future the SDK may create
921// sub-contexts for http.Requests. See https://golang.org/pkg/context/
922// for more information on using Contexts.
923func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) {
924 req, out := c.GetFederationTokenRequest(input)
925 req.SetContext(ctx)
926 req.ApplyOptions(opts...)
927 return out, req.Send()
928}
929
930const opGetSessionToken = "GetSessionToken"
931
932// GetSessionTokenRequest generates a "aws/request.Request" representing the
933// client's request for the GetSessionToken operation. The "output" return
934// value can be used to capture response data after the request's "Send" method
935// is called.
936//
937// See GetSessionToken for usage and error information.
938//
939// Creating a request object using this method should be used when you want to inject
940// custom logic into the request's lifecycle using a custom handler, or if you want to
941// access properties on the request object before or after sending the request. If
942// you just want the service response, call the GetSessionToken method directly
943// instead.
944//
945// Note: You must call the "Send" method on the returned request object in order
946// to execute the request.
947//
948// // Example sending a request using the GetSessionTokenRequest method.
949// req, resp := client.GetSessionTokenRequest(params)
950//
951// err := req.Send()
952// if err == nil { // resp is now filled
953// fmt.Println(resp)
954// }
955//
956// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
957func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
958 op := &request.Operation{
959 Name: opGetSessionToken,
960 HTTPMethod: "POST",
961 HTTPPath: "/",
962 }
963
964 if input == nil {
965 input = &GetSessionTokenInput{}
966 }
967
968 output = &GetSessionTokenOutput{}
969 req = c.newRequest(op, input, output)
970 return
971}
972
973// GetSessionToken API operation for AWS Security Token Service.
974//
975// Returns a set of temporary credentials for an AWS account or IAM user. The
976// credentials consist of an access key ID, a secret access key, and a security
977// token. Typically, you use GetSessionToken if you want to use MFA to protect
978// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled
979// IAM users would need to call GetSessionToken and submit an MFA code that
980// is associated with their MFA device. Using the temporary security credentials
981// that are returned from the call, IAM users can then make programmatic calls
982// to APIs that require MFA authentication. If you do not supply a correct MFA
983// code, then the API returns an access denied error. For a comparison of GetSessionToken
984// with the other APIs that produce temporary credentials, see Requesting Temporary
985// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
986// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
987// in the IAM User Guide.
988//
989// The GetSessionToken action must be called by using the long-term AWS security
990// credentials of the AWS account or an IAM user. Credentials that are created
991// by IAM users are valid for the duration that you specify, from 900 seconds
992// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default
993// of 43200 seconds (12 hours); credentials that are created by using account
994// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600
995// seconds (1 hour), with a default of 1 hour.
996//
997// The temporary security credentials created by GetSessionToken can be used
998// to make API calls to any AWS service with the following exceptions:
999//
1000// * You cannot call any IAM APIs unless MFA authentication information is
1001// included in the request.
1002//
1003// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity.
1004//
1005// We recommend that you do not call GetSessionToken with root account credentials.
1006// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
1007// by creating one or more IAM users, giving them the necessary permissions,
1008// and using IAM users for everyday interaction with AWS.
1009//
1010// The permissions associated with the temporary security credentials returned
1011// by GetSessionToken are based on the permissions associated with account or
1012// IAM user whose credentials are used to call the action. If GetSessionToken
1013// is called using root account credentials, the temporary credentials have
1014// root account permissions. Similarly, if GetSessionToken is called using the
1015// credentials of an IAM user, the temporary credentials have the same permissions
1016// as the IAM user.
1017//
1018// For more information about using GetSessionToken to create temporary credentials,
1019// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
1020// in the IAM User Guide.
1021//
1022// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
1023// with awserr.Error's Code and Message methods to get detailed information about
1024// the error.
1025//
1026// See the AWS API reference guide for AWS Security Token Service's
1027// API operation GetSessionToken for usage and error information.
1028//
1029// Returned Error Codes:
1030// * ErrCodeRegionDisabledException "RegionDisabledException"
1031// STS is not activated in the requested region for the account that is being
1032// asked to generate credentials. The account administrator must use the IAM
1033// console to activate STS in that region. For more information, see Activating
1034// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
1035// in the IAM User Guide.
1036//
1037// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
1038func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
1039 req, out := c.GetSessionTokenRequest(input)
1040 return out, req.Send()
1041}
1042
1043// GetSessionTokenWithContext is the same as GetSessionToken with the addition of
1044// the ability to pass a context and additional request options.
1045//
1046// See GetSessionToken for details on how to use this API operation.
1047//
1048// The context must be non-nil and will be used for request cancellation. If
1049// the context is nil a panic will occur. In the future the SDK may create
1050// sub-contexts for http.Requests. See https://golang.org/pkg/context/
1051// for more information on using Contexts.
1052func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) {
1053 req, out := c.GetSessionTokenRequest(input)
1054 req.SetContext(ctx)
1055 req.ApplyOptions(opts...)
1056 return out, req.Send()
1057}
1058
1059// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest
1060type AssumeRoleInput struct {
1061 _ struct{} `type:"structure"`
1062
1063 // The duration, in seconds, of the role session. The value can range from 900
1064 // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
1065 // to 3600 seconds.
1066 //
1067 // This is separate from the duration of a console session that you might request
1068 // using the returned credentials. The request to the federation endpoint for
1069 // a console sign-in token takes a SessionDuration parameter that specifies
1070 // the maximum length of the console session, separately from the DurationSeconds
1071 // parameter on this API. For more information, see Creating a URL that Enables
1072 // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
1073 // in the IAM User Guide.
1074 DurationSeconds *int64 `min:"900" type:"integer"`
1075
1076 // A unique identifier that is used by third parties when assuming roles in
1077 // their customers' accounts. For each role that the third party can assume,
1078 // they should instruct their customers to ensure the role's trust policy checks
1079 // for the external ID that the third party generated. Each time the third party
1080 // assumes the role, they should pass the customer's external ID. The external
1081 // ID is useful in order to help third parties bind a role to the customer who
1082 // created it. For more information about the external ID, see How to Use an
1083 // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
1084 // in the IAM User Guide.
1085 //
1086 // The regex used to validated this parameter is a string of characters consisting
1087 // of upper- and lower-case alphanumeric characters with no spaces. You can
1088 // also include underscores or any of the following characters: =,.@:\/-
1089 ExternalId *string `min:"2" type:"string"`
1090
1091 // An IAM policy in JSON format.
1092 //
1093 // This parameter is optional. If you pass a policy, the temporary security
1094 // credentials that are returned by the operation have the permissions that
1095 // are allowed by both (the intersection of) the access policy of the role that
1096 // is being assumed, and the policy that you pass. This gives you a way to further
1097 // restrict the permissions for the resulting temporary security credentials.
1098 // You cannot use the passed policy to grant permissions that are in excess
1099 // of those allowed by the access policy of the role that is being assumed.
1100 // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
1101 // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
1102 // in the IAM User Guide.
1103 //
1104 // The format for this parameter, as described by its regex pattern, is a string
1105 // of characters up to 2048 characters in length. The characters can be any
1106 // ASCII character from the space character to the end of the valid character
1107 // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
1108 // and carriage return (\u000D) characters.
1109 //
1110 // The policy plain text must be 2048 bytes or shorter. However, an internal
1111 // conversion compresses it into a packed binary format with a separate limit.
1112 // The PackedPolicySize response element indicates by percentage how close to
1113 // the upper size limit the policy is, with 100% equaling the maximum allowed
1114 // size.
1115 Policy *string `min:"1" type:"string"`
1116
1117 // The Amazon Resource Name (ARN) of the role to assume.
1118 //
1119 // RoleArn is a required field
1120 RoleArn *string `min:"20" type:"string" required:"true"`
1121
1122 // An identifier for the assumed role session.
1123 //
1124 // Use the role session name to uniquely identify a session when the same role
1125 // is assumed by different principals or for different reasons. In cross-account
1126 // scenarios, the role session name is visible to, and can be logged by the
1127 // account that owns the role. The role session name is also used in the ARN
1128 // of the assumed role principal. This means that subsequent cross-account API
1129 // requests using the temporary security credentials will expose the role session
1130 // name to the external account in their CloudTrail logs.
1131 //
1132 // The regex used to validate this parameter is a string of characters consisting
1133 // of upper- and lower-case alphanumeric characters with no spaces. You can
1134 // also include underscores or any of the following characters: =,.@-
1135 //
1136 // RoleSessionName is a required field
1137 RoleSessionName *string `min:"2" type:"string" required:"true"`
1138
1139 // The identification number of the MFA device that is associated with the user
1140 // who is making the AssumeRole call. Specify this value if the trust policy
1141 // of the role being assumed includes a condition that requires MFA authentication.
1142 // The value is either the serial number for a hardware device (such as GAHT12345678)
1143 // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
1144 //
1145 // The regex used to validate this parameter is a string of characters consisting
1146 // of upper- and lower-case alphanumeric characters with no spaces. You can
1147 // also include underscores or any of the following characters: =,.@-
1148 SerialNumber *string `min:"9" type:"string"`
1149
1150 // The value provided by the MFA device, if the trust policy of the role being
1151 // assumed requires MFA (that is, if the policy includes a condition that tests
1152 // for MFA). If the role being assumed requires MFA and if the TokenCode value
1153 // is missing or expired, the AssumeRole call returns an "access denied" error.
1154 //
1155 // The format for this parameter, as described by its regex pattern, is a sequence
1156 // of six numeric digits.
1157 TokenCode *string `min:"6" type:"string"`
1158}
1159
1160// String returns the string representation
1161func (s AssumeRoleInput) String() string {
1162 return awsutil.Prettify(s)
1163}
1164
1165// GoString returns the string representation
1166func (s AssumeRoleInput) GoString() string {
1167 return s.String()
1168}
1169
1170// Validate inspects the fields of the type to determine if they are valid.
1171func (s *AssumeRoleInput) Validate() error {
1172 invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"}
1173 if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
1174 invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
1175 }
1176 if s.ExternalId != nil && len(*s.ExternalId) < 2 {
1177 invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2))
1178 }
1179 if s.Policy != nil && len(*s.Policy) < 1 {
1180 invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
1181 }
1182 if s.RoleArn == nil {
1183 invalidParams.Add(request.NewErrParamRequired("RoleArn"))
1184 }
1185 if s.RoleArn != nil && len(*s.RoleArn) < 20 {
1186 invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
1187 }
1188 if s.RoleSessionName == nil {
1189 invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
1190 }
1191 if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
1192 invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
1193 }
1194 if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
1195 invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
1196 }
1197 if s.TokenCode != nil && len(*s.TokenCode) < 6 {
1198 invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
1199 }
1200
1201 if invalidParams.Len() > 0 {
1202 return invalidParams
1203 }
1204 return nil
1205}
1206
1207// SetDurationSeconds sets the DurationSeconds field's value.
1208func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput {
1209 s.DurationSeconds = &v
1210 return s
1211}
1212
1213// SetExternalId sets the ExternalId field's value.
1214func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput {
1215 s.ExternalId = &v
1216 return s
1217}
1218
1219// SetPolicy sets the Policy field's value.
1220func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput {
1221 s.Policy = &v
1222 return s
1223}
1224
1225// SetRoleArn sets the RoleArn field's value.
1226func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
1227 s.RoleArn = &v
1228 return s
1229}
1230
1231// SetRoleSessionName sets the RoleSessionName field's value.
1232func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput {
1233 s.RoleSessionName = &v
1234 return s
1235}
1236
1237// SetSerialNumber sets the SerialNumber field's value.
1238func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput {
1239 s.SerialNumber = &v
1240 return s
1241}
1242
1243// SetTokenCode sets the TokenCode field's value.
1244func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput {
1245 s.TokenCode = &v
1246 return s
1247}
1248
1249// Contains the response to a successful AssumeRole request, including temporary
1250// AWS credentials that can be used to make AWS requests.
1251// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse
1252type AssumeRoleOutput struct {
1253 _ struct{} `type:"structure"`
1254
1255 // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
1256 // that you can use to refer to the resulting temporary security credentials.
1257 // For example, you can reference these credentials as a principal in a resource-based
1258 // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
1259 // that you specified when you called AssumeRole.
1260 AssumedRoleUser *AssumedRoleUser `type:"structure"`
1261
1262 // The temporary security credentials, which include an access key ID, a secret
1263 // access key, and a security (or session) token.
1264 //
1265 // Note: The size of the security token that STS APIs return is not fixed. We
1266 // strongly recommend that you make no assumptions about the maximum size. As
1267 // of this writing, the typical size is less than 4096 bytes, but that can vary.
1268 // Also, future updates to AWS might require larger sizes.
1269 Credentials *Credentials `type:"structure"`
1270
1271 // A percentage value that indicates the size of the policy in packed form.
1272 // The service rejects any policy with a packed size greater than 100 percent,
1273 // which means the policy exceeded the allowed space.
1274 PackedPolicySize *int64 `type:"integer"`
1275}
1276
1277// String returns the string representation
1278func (s AssumeRoleOutput) String() string {
1279 return awsutil.Prettify(s)
1280}
1281
1282// GoString returns the string representation
1283func (s AssumeRoleOutput) GoString() string {
1284 return s.String()
1285}
1286
1287// SetAssumedRoleUser sets the AssumedRoleUser field's value.
1288func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput {
1289 s.AssumedRoleUser = v
1290 return s
1291}
1292
1293// SetCredentials sets the Credentials field's value.
1294func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput {
1295 s.Credentials = v
1296 return s
1297}
1298
1299// SetPackedPolicySize sets the PackedPolicySize field's value.
1300func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
1301 s.PackedPolicySize = &v
1302 return s
1303}
1304
1305// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest
1306type AssumeRoleWithSAMLInput struct {
1307 _ struct{} `type:"structure"`
1308
1309 // The duration, in seconds, of the role session. The value can range from 900
1310 // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
1311 // to 3600 seconds. An expiration can also be specified in the SAML authentication
1312 // response's SessionNotOnOrAfter value. The actual expiration time is whichever
1313 // value is shorter.
1314 //
1315 // This is separate from the duration of a console session that you might request
1316 // using the returned credentials. The request to the federation endpoint for
1317 // a console sign-in token takes a SessionDuration parameter that specifies
1318 // the maximum length of the console session, separately from the DurationSeconds
1319 // parameter on this API. For more information, see Enabling SAML 2.0 Federated
1320 // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html)
1321 // in the IAM User Guide.
1322 DurationSeconds *int64 `min:"900" type:"integer"`
1323
1324 // An IAM policy in JSON format.
1325 //
1326 // The policy parameter is optional. If you pass a policy, the temporary security
1327 // credentials that are returned by the operation have the permissions that
1328 // are allowed by both the access policy of the role that is being assumed,
1329 // and the policy that you pass. This gives you a way to further restrict the
1330 // permissions for the resulting temporary security credentials. You cannot
1331 // use the passed policy to grant permissions that are in excess of those allowed
1332 // by the access policy of the role that is being assumed. For more information,
1333 // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
1334 // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
1335 // in the IAM User Guide.
1336 //
1337 // The format for this parameter, as described by its regex pattern, is a string
1338 // of characters up to 2048 characters in length. The characters can be any
1339 // ASCII character from the space character to the end of the valid character
1340 // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
1341 // and carriage return (\u000D) characters.
1342 //
1343 // The policy plain text must be 2048 bytes or shorter. However, an internal
1344 // conversion compresses it into a packed binary format with a separate limit.
1345 // The PackedPolicySize response element indicates by percentage how close to
1346 // the upper size limit the policy is, with 100% equaling the maximum allowed
1347 // size.
1348 Policy *string `min:"1" type:"string"`
1349
1350 // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
1351 // the IdP.
1352 //
1353 // PrincipalArn is a required field
1354 PrincipalArn *string `min:"20" type:"string" required:"true"`
1355
1356 // The Amazon Resource Name (ARN) of the role that the caller is assuming.
1357 //
1358 // RoleArn is a required field
1359 RoleArn *string `min:"20" type:"string" required:"true"`
1360
1361 // The base-64 encoded SAML authentication response provided by the IdP.
1362 //
1363 // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
1364 // in the Using IAM guide.
1365 //
1366 // SAMLAssertion is a required field
1367 SAMLAssertion *string `min:"4" type:"string" required:"true"`
1368}
1369
1370// String returns the string representation
1371func (s AssumeRoleWithSAMLInput) String() string {
1372 return awsutil.Prettify(s)
1373}
1374
1375// GoString returns the string representation
1376func (s AssumeRoleWithSAMLInput) GoString() string {
1377 return s.String()
1378}
1379
1380// Validate inspects the fields of the type to determine if they are valid.
1381func (s *AssumeRoleWithSAMLInput) Validate() error {
1382 invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"}
1383 if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
1384 invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
1385 }
1386 if s.Policy != nil && len(*s.Policy) < 1 {
1387 invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
1388 }
1389 if s.PrincipalArn == nil {
1390 invalidParams.Add(request.NewErrParamRequired("PrincipalArn"))
1391 }
1392 if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 {
1393 invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20))
1394 }
1395 if s.RoleArn == nil {
1396 invalidParams.Add(request.NewErrParamRequired("RoleArn"))
1397 }
1398 if s.RoleArn != nil && len(*s.RoleArn) < 20 {
1399 invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
1400 }
1401 if s.SAMLAssertion == nil {
1402 invalidParams.Add(request.NewErrParamRequired("SAMLAssertion"))
1403 }
1404 if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 {
1405 invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4))
1406 }
1407
1408 if invalidParams.Len() > 0 {
1409 return invalidParams
1410 }
1411 return nil
1412}
1413
1414// SetDurationSeconds sets the DurationSeconds field's value.
1415func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput {
1416 s.DurationSeconds = &v
1417 return s
1418}
1419
1420// SetPolicy sets the Policy field's value.
1421func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput {
1422 s.Policy = &v
1423 return s
1424}
1425
1426// SetPrincipalArn sets the PrincipalArn field's value.
1427func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput {
1428 s.PrincipalArn = &v
1429 return s
1430}
1431
1432// SetRoleArn sets the RoleArn field's value.
1433func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput {
1434 s.RoleArn = &v
1435 return s
1436}
1437
1438// SetSAMLAssertion sets the SAMLAssertion field's value.
1439func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput {
1440 s.SAMLAssertion = &v
1441 return s
1442}
1443
1444// Contains the response to a successful AssumeRoleWithSAML request, including
1445// temporary AWS credentials that can be used to make AWS requests.
1446// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse
1447type AssumeRoleWithSAMLOutput struct {
1448 _ struct{} `type:"structure"`
1449
1450 // The identifiers for the temporary security credentials that the operation
1451 // returns.
1452 AssumedRoleUser *AssumedRoleUser `type:"structure"`
1453
1454 // The value of the Recipient attribute of the SubjectConfirmationData element
1455 // of the SAML assertion.
1456 Audience *string `type:"string"`
1457
1458 // The temporary security credentials, which include an access key ID, a secret
1459 // access key, and a security (or session) token.
1460 //
1461 // Note: The size of the security token that STS APIs return is not fixed. We
1462 // strongly recommend that you make no assumptions about the maximum size. As
1463 // of this writing, the typical size is less than 4096 bytes, but that can vary.
1464 // Also, future updates to AWS might require larger sizes.
1465 Credentials *Credentials `type:"structure"`
1466
1467 // The value of the Issuer element of the SAML assertion.
1468 Issuer *string `type:"string"`
1469
1470 // A hash value based on the concatenation of the Issuer response value, the
1471 // AWS account ID, and the friendly name (the last part of the ARN) of the SAML
1472 // provider in IAM. The combination of NameQualifier and Subject can be used
1473 // to uniquely identify a federated user.
1474 //
1475 // The following pseudocode shows how the hash value is calculated:
1476 //
1477 // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
1478 // ) )
1479 NameQualifier *string `type:"string"`
1480
1481 // A percentage value that indicates the size of the policy in packed form.
1482 // The service rejects any policy with a packed size greater than 100 percent,
1483 // which means the policy exceeded the allowed space.
1484 PackedPolicySize *int64 `type:"integer"`
1485
1486 // The value of the NameID element in the Subject element of the SAML assertion.
1487 Subject *string `type:"string"`
1488
1489 // The format of the name ID, as defined by the Format attribute in the NameID
1490 // element of the SAML assertion. Typical examples of the format are transient
1491 // or persistent.
1492 //
1493 // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
1494 // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient
1495 // is returned as transient. If the format includes any other prefix, the format
1496 // is returned with no modifications.
1497 SubjectType *string `type:"string"`
1498}
1499
1500// String returns the string representation
1501func (s AssumeRoleWithSAMLOutput) String() string {
1502 return awsutil.Prettify(s)
1503}
1504
1505// GoString returns the string representation
1506func (s AssumeRoleWithSAMLOutput) GoString() string {
1507 return s.String()
1508}
1509
1510// SetAssumedRoleUser sets the AssumedRoleUser field's value.
1511func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput {
1512 s.AssumedRoleUser = v
1513 return s
1514}
1515
1516// SetAudience sets the Audience field's value.
1517func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput {
1518 s.Audience = &v
1519 return s
1520}
1521
1522// SetCredentials sets the Credentials field's value.
1523func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput {
1524 s.Credentials = v
1525 return s
1526}
1527
1528// SetIssuer sets the Issuer field's value.
1529func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput {
1530 s.Issuer = &v
1531 return s
1532}
1533
1534// SetNameQualifier sets the NameQualifier field's value.
1535func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput {
1536 s.NameQualifier = &v
1537 return s
1538}
1539
1540// SetPackedPolicySize sets the PackedPolicySize field's value.
1541func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput {
1542 s.PackedPolicySize = &v
1543 return s
1544}
1545
1546// SetSubject sets the Subject field's value.
1547func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput {
1548 s.Subject = &v
1549 return s
1550}
1551
1552// SetSubjectType sets the SubjectType field's value.
1553func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput {
1554 s.SubjectType = &v
1555 return s
1556}
1557
1558// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest
1559type AssumeRoleWithWebIdentityInput struct {
1560 _ struct{} `type:"structure"`
1561
1562 // The duration, in seconds, of the role session. The value can range from 900
1563 // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
1564 // to 3600 seconds.
1565 //
1566 // This is separate from the duration of a console session that you might request
1567 // using the returned credentials. The request to the federation endpoint for
1568 // a console sign-in token takes a SessionDuration parameter that specifies
1569 // the maximum length of the console session, separately from the DurationSeconds
1570 // parameter on this API. For more information, see Creating a URL that Enables
1571 // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
1572 // in the IAM User Guide.
1573 DurationSeconds *int64 `min:"900" type:"integer"`
1574
1575 // An IAM policy in JSON format.
1576 //
1577 // The policy parameter is optional. If you pass a policy, the temporary security
1578 // credentials that are returned by the operation have the permissions that
1579 // are allowed by both the access policy of the role that is being assumed,
1580 // and the policy that you pass. This gives you a way to further restrict the
1581 // permissions for the resulting temporary security credentials. You cannot
1582 // use the passed policy to grant permissions that are in excess of those allowed
1583 // by the access policy of the role that is being assumed. For more information,
1584 // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
1585 // in the IAM User Guide.
1586 //
1587 // The format for this parameter, as described by its regex pattern, is a string
1588 // of characters up to 2048 characters in length. The characters can be any
1589 // ASCII character from the space character to the end of the valid character
1590 // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
1591 // and carriage return (\u000D) characters.
1592 //
1593 // The policy plain text must be 2048 bytes or shorter. However, an internal
1594 // conversion compresses it into a packed binary format with a separate limit.
1595 // The PackedPolicySize response element indicates by percentage how close to
1596 // the upper size limit the policy is, with 100% equaling the maximum allowed
1597 // size.
1598 Policy *string `min:"1" type:"string"`
1599
1600 // The fully qualified host component of the domain name of the identity provider.
1601 //
1602 // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
1603 // and graph.facebook.com are the only supported identity providers for OAuth
1604 // 2.0 access tokens. Do not include URL schemes and port numbers.
1605 //
1606 // Do not specify this value for OpenID Connect ID tokens.
1607 ProviderId *string `min:"4" type:"string"`
1608
1609 // The Amazon Resource Name (ARN) of the role that the caller is assuming.
1610 //
1611 // RoleArn is a required field
1612 RoleArn *string `min:"20" type:"string" required:"true"`
1613
1614 // An identifier for the assumed role session. Typically, you pass the name
1615 // or identifier that is associated with the user who is using your application.
1616 // That way, the temporary security credentials that your application will use
1617 // are associated with that user. This session name is included as part of the
1618 // ARN and assumed role ID in the AssumedRoleUser response element.
1619 //
1620 // The regex used to validate this parameter is a string of characters consisting
1621 // of upper- and lower-case alphanumeric characters with no spaces. You can
1622 // also include underscores or any of the following characters: =,.@-
1623 //
1624 // RoleSessionName is a required field
1625 RoleSessionName *string `min:"2" type:"string" required:"true"`
1626
1627 // The OAuth 2.0 access token or OpenID Connect ID token that is provided by
1628 // the identity provider. Your application must get this token by authenticating
1629 // the user who is using your application with a web identity provider before
1630 // the application makes an AssumeRoleWithWebIdentity call.
1631 //
1632 // WebIdentityToken is a required field
1633 WebIdentityToken *string `min:"4" type:"string" required:"true"`
1634}
1635
1636// String returns the string representation
1637func (s AssumeRoleWithWebIdentityInput) String() string {
1638 return awsutil.Prettify(s)
1639}
1640
1641// GoString returns the string representation
1642func (s AssumeRoleWithWebIdentityInput) GoString() string {
1643 return s.String()
1644}
1645
1646// Validate inspects the fields of the type to determine if they are valid.
1647func (s *AssumeRoleWithWebIdentityInput) Validate() error {
1648 invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"}
1649 if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
1650 invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
1651 }
1652 if s.Policy != nil && len(*s.Policy) < 1 {
1653 invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
1654 }
1655 if s.ProviderId != nil && len(*s.ProviderId) < 4 {
1656 invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4))
1657 }
1658 if s.RoleArn == nil {
1659 invalidParams.Add(request.NewErrParamRequired("RoleArn"))
1660 }
1661 if s.RoleArn != nil && len(*s.RoleArn) < 20 {
1662 invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
1663 }
1664 if s.RoleSessionName == nil {
1665 invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
1666 }
1667 if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
1668 invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
1669 }
1670 if s.WebIdentityToken == nil {
1671 invalidParams.Add(request.NewErrParamRequired("WebIdentityToken"))
1672 }
1673 if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 {
1674 invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4))
1675 }
1676
1677 if invalidParams.Len() > 0 {
1678 return invalidParams
1679 }
1680 return nil
1681}
1682
1683// SetDurationSeconds sets the DurationSeconds field's value.
1684func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput {
1685 s.DurationSeconds = &v
1686 return s
1687}
1688
1689// SetPolicy sets the Policy field's value.
1690func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput {
1691 s.Policy = &v
1692 return s
1693}
1694
1695// SetProviderId sets the ProviderId field's value.
1696func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput {
1697 s.ProviderId = &v
1698 return s
1699}
1700
1701// SetRoleArn sets the RoleArn field's value.
1702func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput {
1703 s.RoleArn = &v
1704 return s
1705}
1706
1707// SetRoleSessionName sets the RoleSessionName field's value.
1708func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput {
1709 s.RoleSessionName = &v
1710 return s
1711}
1712
1713// SetWebIdentityToken sets the WebIdentityToken field's value.
1714func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput {
1715 s.WebIdentityToken = &v
1716 return s
1717}
1718
1719// Contains the response to a successful AssumeRoleWithWebIdentity request,
1720// including temporary AWS credentials that can be used to make AWS requests.
1721// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse
1722type AssumeRoleWithWebIdentityOutput struct {
1723 _ struct{} `type:"structure"`
1724
1725 // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
1726 // that you can use to refer to the resulting temporary security credentials.
1727 // For example, you can reference these credentials as a principal in a resource-based
1728 // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
1729 // that you specified when you called AssumeRole.
1730 AssumedRoleUser *AssumedRoleUser `type:"structure"`
1731
1732 // The intended audience (also known as client ID) of the web identity token.
1733 // This is traditionally the client identifier issued to the application that
1734 // requested the web identity token.
1735 Audience *string `type:"string"`
1736
1737 // The temporary security credentials, which include an access key ID, a secret
1738 // access key, and a security token.
1739 //
1740 // Note: The size of the security token that STS APIs return is not fixed. We
1741 // strongly recommend that you make no assumptions about the maximum size. As
1742 // of this writing, the typical size is less than 4096 bytes, but that can vary.
1743 // Also, future updates to AWS might require larger sizes.
1744 Credentials *Credentials `type:"structure"`
1745
1746 // A percentage value that indicates the size of the policy in packed form.
1747 // The service rejects any policy with a packed size greater than 100 percent,
1748 // which means the policy exceeded the allowed space.
1749 PackedPolicySize *int64 `type:"integer"`
1750
1751 // The issuing authority of the web identity token presented. For OpenID Connect
1752 // ID Tokens this contains the value of the iss field. For OAuth 2.0 access
1753 // tokens, this contains the value of the ProviderId parameter that was passed
1754 // in the AssumeRoleWithWebIdentity request.
1755 Provider *string `type:"string"`
1756
1757 // The unique user identifier that is returned by the identity provider. This
1758 // identifier is associated with the WebIdentityToken that was submitted with
1759 // the AssumeRoleWithWebIdentity call. The identifier is typically unique to
1760 // the user and the application that acquired the WebIdentityToken (pairwise
1761 // identifier). For OpenID Connect ID tokens, this field contains the value
1762 // returned by the identity provider as the token's sub (Subject) claim.
1763 SubjectFromWebIdentityToken *string `min:"6" type:"string"`
1764}
1765
1766// String returns the string representation
1767func (s AssumeRoleWithWebIdentityOutput) String() string {
1768 return awsutil.Prettify(s)
1769}
1770
1771// GoString returns the string representation
1772func (s AssumeRoleWithWebIdentityOutput) GoString() string {
1773 return s.String()
1774}
1775
1776// SetAssumedRoleUser sets the AssumedRoleUser field's value.
1777func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput {
1778 s.AssumedRoleUser = v
1779 return s
1780}
1781
1782// SetAudience sets the Audience field's value.
1783func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput {
1784 s.Audience = &v
1785 return s
1786}
1787
1788// SetCredentials sets the Credentials field's value.
1789func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput {
1790 s.Credentials = v
1791 return s
1792}
1793
1794// SetPackedPolicySize sets the PackedPolicySize field's value.
1795func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput {
1796 s.PackedPolicySize = &v
1797 return s
1798}
1799
1800// SetProvider sets the Provider field's value.
1801func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput {
1802 s.Provider = &v
1803 return s
1804}
1805
1806// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value.
1807func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput {
1808 s.SubjectFromWebIdentityToken = &v
1809 return s
1810}
1811
1812// The identifiers for the temporary security credentials that the operation
1813// returns.
1814// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
1815type AssumedRoleUser struct {
1816 _ struct{} `type:"structure"`
1817
1818 // The ARN of the temporary security credentials that are returned from the
1819 // AssumeRole action. For more information about ARNs and how to use them in
1820 // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
1821 // in Using IAM.
1822 //
1823 // Arn is a required field
1824 Arn *string `min:"20" type:"string" required:"true"`
1825
1826 // A unique identifier that contains the role ID and the role session name of
1827 // the role that is being assumed. The role ID is generated by AWS when the
1828 // role is created.
1829 //
1830 // AssumedRoleId is a required field
1831 AssumedRoleId *string `min:"2" type:"string" required:"true"`
1832}
1833
1834// String returns the string representation
1835func (s AssumedRoleUser) String() string {
1836 return awsutil.Prettify(s)
1837}
1838
1839// GoString returns the string representation
1840func (s AssumedRoleUser) GoString() string {
1841 return s.String()
1842}
1843
1844// SetArn sets the Arn field's value.
1845func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser {
1846 s.Arn = &v
1847 return s
1848}
1849
1850// SetAssumedRoleId sets the AssumedRoleId field's value.
1851func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser {
1852 s.AssumedRoleId = &v
1853 return s
1854}
1855
1856// AWS credentials for API authentication.
1857// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials
1858type Credentials struct {
1859 _ struct{} `type:"structure"`
1860
1861 // The access key ID that identifies the temporary security credentials.
1862 //
1863 // AccessKeyId is a required field
1864 AccessKeyId *string `min:"16" type:"string" required:"true"`
1865
1866 // The date on which the current credentials expire.
1867 //
1868 // Expiration is a required field
1869 Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"`
1870
1871 // The secret access key that can be used to sign requests.
1872 //
1873 // SecretAccessKey is a required field
1874 SecretAccessKey *string `type:"string" required:"true"`
1875
1876 // The token that users must pass to the service API to use the temporary credentials.
1877 //
1878 // SessionToken is a required field
1879 SessionToken *string `type:"string" required:"true"`
1880}
1881
1882// String returns the string representation
1883func (s Credentials) String() string {
1884 return awsutil.Prettify(s)
1885}
1886
1887// GoString returns the string representation
1888func (s Credentials) GoString() string {
1889 return s.String()
1890}
1891
1892// SetAccessKeyId sets the AccessKeyId field's value.
1893func (s *Credentials) SetAccessKeyId(v string) *Credentials {
1894 s.AccessKeyId = &v
1895 return s
1896}
1897
1898// SetExpiration sets the Expiration field's value.
1899func (s *Credentials) SetExpiration(v time.Time) *Credentials {
1900 s.Expiration = &v
1901 return s
1902}
1903
1904// SetSecretAccessKey sets the SecretAccessKey field's value.
1905func (s *Credentials) SetSecretAccessKey(v string) *Credentials {
1906 s.SecretAccessKey = &v
1907 return s
1908}
1909
1910// SetSessionToken sets the SessionToken field's value.
1911func (s *Credentials) SetSessionToken(v string) *Credentials {
1912 s.SessionToken = &v
1913 return s
1914}
1915
1916// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest
1917type DecodeAuthorizationMessageInput struct {
1918 _ struct{} `type:"structure"`
1919
1920 // The encoded message that was returned with the response.
1921 //
1922 // EncodedMessage is a required field
1923 EncodedMessage *string `min:"1" type:"string" required:"true"`
1924}
1925
1926// String returns the string representation
1927func (s DecodeAuthorizationMessageInput) String() string {
1928 return awsutil.Prettify(s)
1929}
1930
1931// GoString returns the string representation
1932func (s DecodeAuthorizationMessageInput) GoString() string {
1933 return s.String()
1934}
1935
1936// Validate inspects the fields of the type to determine if they are valid.
1937func (s *DecodeAuthorizationMessageInput) Validate() error {
1938 invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"}
1939 if s.EncodedMessage == nil {
1940 invalidParams.Add(request.NewErrParamRequired("EncodedMessage"))
1941 }
1942 if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 {
1943 invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1))
1944 }
1945
1946 if invalidParams.Len() > 0 {
1947 return invalidParams
1948 }
1949 return nil
1950}
1951
1952// SetEncodedMessage sets the EncodedMessage field's value.
1953func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput {
1954 s.EncodedMessage = &v
1955 return s
1956}
1957
1958// A document that contains additional information about the authorization status
1959// of a request from an encoded message that is returned in response to an AWS
1960// request.
1961// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse
1962type DecodeAuthorizationMessageOutput struct {
1963 _ struct{} `type:"structure"`
1964
1965 // An XML document that contains the decoded message.
1966 DecodedMessage *string `type:"string"`
1967}
1968
1969// String returns the string representation
1970func (s DecodeAuthorizationMessageOutput) String() string {
1971 return awsutil.Prettify(s)
1972}
1973
1974// GoString returns the string representation
1975func (s DecodeAuthorizationMessageOutput) GoString() string {
1976 return s.String()
1977}
1978
1979// SetDecodedMessage sets the DecodedMessage field's value.
1980func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput {
1981 s.DecodedMessage = &v
1982 return s
1983}
1984
1985// Identifiers for the federated user that is associated with the credentials.
1986// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser
1987type FederatedUser struct {
1988 _ struct{} `type:"structure"`
1989
1990 // The ARN that specifies the federated user that is associated with the credentials.
1991 // For more information about ARNs and how to use them in policies, see IAM
1992 // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
1993 // in Using IAM.
1994 //
1995 // Arn is a required field
1996 Arn *string `min:"20" type:"string" required:"true"`
1997
1998 // The string that identifies the federated user associated with the credentials,
1999 // similar to the unique ID of an IAM user.
2000 //
2001 // FederatedUserId is a required field
2002 FederatedUserId *string `min:"2" type:"string" required:"true"`
2003}
2004
2005// String returns the string representation
2006func (s FederatedUser) String() string {
2007 return awsutil.Prettify(s)
2008}
2009
2010// GoString returns the string representation
2011func (s FederatedUser) GoString() string {
2012 return s.String()
2013}
2014
2015// SetArn sets the Arn field's value.
2016func (s *FederatedUser) SetArn(v string) *FederatedUser {
2017 s.Arn = &v
2018 return s
2019}
2020
2021// SetFederatedUserId sets the FederatedUserId field's value.
2022func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
2023 s.FederatedUserId = &v
2024 return s
2025}
2026
2027// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest
2028type GetCallerIdentityInput struct {
2029 _ struct{} `type:"structure"`
2030}
2031
2032// String returns the string representation
2033func (s GetCallerIdentityInput) String() string {
2034 return awsutil.Prettify(s)
2035}
2036
2037// GoString returns the string representation
2038func (s GetCallerIdentityInput) GoString() string {
2039 return s.String()
2040}
2041
2042// Contains the response to a successful GetCallerIdentity request, including
2043// information about the entity making the request.
2044// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse
2045type GetCallerIdentityOutput struct {
2046 _ struct{} `type:"structure"`
2047
2048 // The AWS account ID number of the account that owns or contains the calling
2049 // entity.
2050 Account *string `type:"string"`
2051
2052 // The AWS ARN associated with the calling entity.
2053 Arn *string `min:"20" type:"string"`
2054
2055 // The unique identifier of the calling entity. The exact value depends on the
2056 // type of entity making the call. The values returned are those listed in the
2057 // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
2058 // found on the Policy Variables reference page in the IAM User Guide.
2059 UserId *string `type:"string"`
2060}
2061
2062// String returns the string representation
2063func (s GetCallerIdentityOutput) String() string {
2064 return awsutil.Prettify(s)
2065}
2066
2067// GoString returns the string representation
2068func (s GetCallerIdentityOutput) GoString() string {
2069 return s.String()
2070}
2071
2072// SetAccount sets the Account field's value.
2073func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput {
2074 s.Account = &v
2075 return s
2076}
2077
2078// SetArn sets the Arn field's value.
2079func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput {
2080 s.Arn = &v
2081 return s
2082}
2083
2084// SetUserId sets the UserId field's value.
2085func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput {
2086 s.UserId = &v
2087 return s
2088}
2089
2090// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest
2091type GetFederationTokenInput struct {
2092 _ struct{} `type:"structure"`
2093
2094 // The duration, in seconds, that the session should last. Acceptable durations
2095 // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds
2096 // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained
2097 // using AWS account (root) credentials are restricted to a maximum of 3600
2098 // seconds (one hour). If the specified duration is longer than one hour, the
2099 // session obtained by using AWS account (root) credentials defaults to one
2100 // hour.
2101 DurationSeconds *int64 `min:"900" type:"integer"`
2102
2103 // The name of the federated user. The name is used as an identifier for the
2104 // temporary security credentials (such as Bob). For example, you can reference
2105 // the federated user name in a resource-based policy, such as in an Amazon
2106 // S3 bucket policy.
2107 //
2108 // The regex used to validate this parameter is a string of characters consisting
2109 // of upper- and lower-case alphanumeric characters with no spaces. You can
2110 // also include underscores or any of the following characters: =,.@-
2111 //
2112 // Name is a required field
2113 Name *string `min:"2" type:"string" required:"true"`
2114
2115 // An IAM policy in JSON format that is passed with the GetFederationToken call
2116 // and evaluated along with the policy or policies that are attached to the
2117 // IAM user whose credentials are used to call GetFederationToken. The passed
2118 // policy is used to scope down the permissions that are available to the IAM
2119 // user, by allowing only a subset of the permissions that are granted to the
2120 // IAM user. The passed policy cannot grant more permissions than those granted
2121 // to the IAM user. The final permissions for the federated user are the most
2122 // restrictive set based on the intersection of the passed policy and the IAM
2123 // user policy.
2124 //
2125 // If you do not pass a policy, the resulting temporary security credentials
2126 // have no effective permissions. The only exception is when the temporary security
2127 // credentials are used to access a resource that has a resource-based policy
2128 // that specifically allows the federated user to access the resource.
2129 //
2130 // The format for this parameter, as described by its regex pattern, is a string
2131 // of characters up to 2048 characters in length. The characters can be any
2132 // ASCII character from the space character to the end of the valid character
2133 // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
2134 // and carriage return (\u000D) characters.
2135 //
2136 // The policy plain text must be 2048 bytes or shorter. However, an internal
2137 // conversion compresses it into a packed binary format with a separate limit.
2138 // The PackedPolicySize response element indicates by percentage how close to
2139 // the upper size limit the policy is, with 100% equaling the maximum allowed
2140 // size.
2141 //
2142 // For more information about how permissions work, see Permissions for GetFederationToken
2143 // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
2144 Policy *string `min:"1" type:"string"`
2145}
2146
2147// String returns the string representation
2148func (s GetFederationTokenInput) String() string {
2149 return awsutil.Prettify(s)
2150}
2151
2152// GoString returns the string representation
2153func (s GetFederationTokenInput) GoString() string {
2154 return s.String()
2155}
2156
2157// Validate inspects the fields of the type to determine if they are valid.
2158func (s *GetFederationTokenInput) Validate() error {
2159 invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"}
2160 if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
2161 invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
2162 }
2163 if s.Name == nil {
2164 invalidParams.Add(request.NewErrParamRequired("Name"))
2165 }
2166 if s.Name != nil && len(*s.Name) < 2 {
2167 invalidParams.Add(request.NewErrParamMinLen("Name", 2))
2168 }
2169 if s.Policy != nil && len(*s.Policy) < 1 {
2170 invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
2171 }
2172
2173 if invalidParams.Len() > 0 {
2174 return invalidParams
2175 }
2176 return nil
2177}
2178
2179// SetDurationSeconds sets the DurationSeconds field's value.
2180func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput {
2181 s.DurationSeconds = &v
2182 return s
2183}
2184
2185// SetName sets the Name field's value.
2186func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput {
2187 s.Name = &v
2188 return s
2189}
2190
2191// SetPolicy sets the Policy field's value.
2192func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
2193 s.Policy = &v
2194 return s
2195}
2196
2197// Contains the response to a successful GetFederationToken request, including
2198// temporary AWS credentials that can be used to make AWS requests.
2199// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse
2200type GetFederationTokenOutput struct {
2201 _ struct{} `type:"structure"`
2202
2203 // The temporary security credentials, which include an access key ID, a secret
2204 // access key, and a security (or session) token.
2205 //
2206 // Note: The size of the security token that STS APIs return is not fixed. We
2207 // strongly recommend that you make no assumptions about the maximum size. As
2208 // of this writing, the typical size is less than 4096 bytes, but that can vary.
2209 // Also, future updates to AWS might require larger sizes.
2210 Credentials *Credentials `type:"structure"`
2211
2212 // Identifiers for the federated user associated with the credentials (such
2213 // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You
2214 // can use the federated user's ARN in your resource-based policies, such as
2215 // an Amazon S3 bucket policy.
2216 FederatedUser *FederatedUser `type:"structure"`
2217
2218 // A percentage value indicating the size of the policy in packed form. The
2219 // service rejects policies for which the packed size is greater than 100 percent
2220 // of the allowed value.
2221 PackedPolicySize *int64 `type:"integer"`
2222}
2223
2224// String returns the string representation
2225func (s GetFederationTokenOutput) String() string {
2226 return awsutil.Prettify(s)
2227}
2228
2229// GoString returns the string representation
2230func (s GetFederationTokenOutput) GoString() string {
2231 return s.String()
2232}
2233
2234// SetCredentials sets the Credentials field's value.
2235func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput {
2236 s.Credentials = v
2237 return s
2238}
2239
2240// SetFederatedUser sets the FederatedUser field's value.
2241func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput {
2242 s.FederatedUser = v
2243 return s
2244}
2245
2246// SetPackedPolicySize sets the PackedPolicySize field's value.
2247func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput {
2248 s.PackedPolicySize = &v
2249 return s
2250}
2251
2252// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest
2253type GetSessionTokenInput struct {
2254 _ struct{} `type:"structure"`
2255
2256 // The duration, in seconds, that the credentials should remain valid. Acceptable
2257 // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600
2258 // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions
2259 // for AWS account owners are restricted to a maximum of 3600 seconds (one hour).
2260 // If the duration is longer than one hour, the session for AWS account owners
2261 // defaults to one hour.
2262 DurationSeconds *int64 `min:"900" type:"integer"`
2263
2264 // The identification number of the MFA device that is associated with the IAM
2265 // user who is making the GetSessionToken call. Specify this value if the IAM
2266 // user has a policy that requires MFA authentication. The value is either the
2267 // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource
2268 // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
2269 // You can find the device for an IAM user by going to the AWS Management Console
2270 // and viewing the user's security credentials.
2271 //
2272 // The regex used to validate this parameter is a string of characters consisting
2273 // of upper- and lower-case alphanumeric characters with no spaces. You can
2274 // also include underscores or any of the following characters: =,.@-
2275 SerialNumber *string `min:"9" type:"string"`
2276
2277 // The value provided by the MFA device, if MFA is required. If any policy requires
2278 // the IAM user to submit an MFA code, specify this value. If MFA authentication
2279 // is required, and the user does not provide a code when requesting a set of
2280 // temporary security credentials, the user will receive an "access denied"
2281 // response when requesting resources that require MFA authentication.
2282 //
2283 // The format for this parameter, as described by its regex pattern, is a sequence
2284 // of six numeric digits.
2285 TokenCode *string `min:"6" type:"string"`
2286}
2287
2288// String returns the string representation
2289func (s GetSessionTokenInput) String() string {
2290 return awsutil.Prettify(s)
2291}
2292
2293// GoString returns the string representation
2294func (s GetSessionTokenInput) GoString() string {
2295 return s.String()
2296}
2297
2298// Validate inspects the fields of the type to determine if they are valid.
2299func (s *GetSessionTokenInput) Validate() error {
2300 invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"}
2301 if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
2302 invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
2303 }
2304 if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
2305 invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
2306 }
2307 if s.TokenCode != nil && len(*s.TokenCode) < 6 {
2308 invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
2309 }
2310
2311 if invalidParams.Len() > 0 {
2312 return invalidParams
2313 }
2314 return nil
2315}
2316
2317// SetDurationSeconds sets the DurationSeconds field's value.
2318func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput {
2319 s.DurationSeconds = &v
2320 return s
2321}
2322
2323// SetSerialNumber sets the SerialNumber field's value.
2324func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput {
2325 s.SerialNumber = &v
2326 return s
2327}
2328
2329// SetTokenCode sets the TokenCode field's value.
2330func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput {
2331 s.TokenCode = &v
2332 return s
2333}
2334
2335// Contains the response to a successful GetSessionToken request, including
2336// temporary AWS credentials that can be used to make AWS requests.
2337// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse
2338type GetSessionTokenOutput struct {
2339 _ struct{} `type:"structure"`
2340
2341 // The temporary security credentials, which include an access key ID, a secret
2342 // access key, and a security (or session) token.
2343 //
2344 // Note: The size of the security token that STS APIs return is not fixed. We
2345 // strongly recommend that you make no assumptions about the maximum size. As
2346 // of this writing, the typical size is less than 4096 bytes, but that can vary.
2347 // Also, future updates to AWS might require larger sizes.
2348 Credentials *Credentials `type:"structure"`
2349}
2350
2351// String returns the string representation
2352func (s GetSessionTokenOutput) String() string {
2353 return awsutil.Prettify(s)
2354}
2355
2356// GoString returns the string representation
2357func (s GetSessionTokenOutput) GoString() string {
2358 return s.String()
2359}
2360
2361// SetCredentials sets the Credentials field's value.
2362func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput {
2363 s.Credentials = v
2364 return s
2365}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
new file mode 100644
index 0000000..4010cc7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
@@ -0,0 +1,12 @@
1package sts
2
3import "github.com/aws/aws-sdk-go/aws/request"
4
5func init() {
6 initRequest = func(r *request.Request) {
7 switch r.Operation.Name {
8 case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity:
9 r.Handlers.Sign.Clear() // these operations are unsigned
10 }
11 }
12}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
new file mode 100644
index 0000000..d2af518
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
@@ -0,0 +1,124 @@
1// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
2
3// Package sts provides the client and types for making API
4// requests to AWS Security Token Service.
5//
6// The AWS Security Token Service (STS) is a web service that enables you to
7// request temporary, limited-privilege credentials for AWS Identity and Access
8// Management (IAM) users or for users that you authenticate (federated users).
9// This guide provides descriptions of the STS API. For more detailed information
10// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
11//
12// As an alternative to using the API, you can use one of the AWS SDKs, which
13// consist of libraries and sample code for various programming languages and
14// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
15// way to create programmatic access to STS. For example, the SDKs take care
16// of cryptographically signing requests, managing errors, and retrying requests
17// automatically. For information about the AWS SDKs, including how to download
18// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
19//
20// For information about setting up signatures and authorization through the
21// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
22// in the AWS General Reference. For general information about the Query API,
23// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
24// in Using IAM. For information about using security tokens with other AWS
25// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
26// in the IAM User Guide.
27//
28// If you're new to AWS and need additional technical information about a specific
29// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
30// (http://aws.amazon.com/documentation/).
31//
32// Endpoints
33//
34// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
35// that maps to the US East (N. Virginia) region. Additional regions are available
36// and are activated by default. For more information, see Activating and Deactivating
37// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
38// in the IAM User Guide.
39//
40// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
41// in the AWS General Reference.
42//
43// Recording API requests
44//
45// STS supports AWS CloudTrail, which is a service that records AWS calls for
46// your AWS account and delivers log files to an Amazon S3 bucket. By using
47// information collected by CloudTrail, you can determine what requests were
48// successfully made to STS, who made the request, when it was made, and so
49// on. To learn more about CloudTrail, including how to turn it on and find
50// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
51//
52// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
53//
54// See sts package documentation for more information.
55// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
56//
57// Using the Client
58//
59// To use the client for AWS Security Token Service you will first need
60// to create a new instance of it.
61//
62// When creating a client for an AWS service you'll first need to have a Session
63// already created. The Session provides configuration that can be shared
64// between multiple service clients. Additional configuration can be applied to
65// the Session and service's client when they are constructed. The aws package's
66// Config type contains several fields such as Region for the AWS Region the
67// client should make API requests too. The optional Config value can be provided
68// as the variadic argument for Sessions and client creation.
69//
70// Once the service's client is created you can use it to make API requests the
71// AWS service. These clients are safe to use concurrently.
72//
73// // Create a session to share configuration, and load external configuration.
74// sess := session.Must(session.NewSession())
75//
76// // Create the service's client with the session.
77// svc := sts.New(sess)
78//
79// See the SDK's documentation for more information on how to use service clients.
80// https://docs.aws.amazon.com/sdk-for-go/api/
81//
82// See aws package's Config type for more information on configuration options.
83// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
84//
85// See the AWS Security Token Service client STS for more
86// information on creating the service's client.
87// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
88//
89// Once the client is created you can make an API request to the service.
90// Each API method takes a input parameter, and returns the service response
91// and an error.
92//
93// The API method will document which error codes the service can be returned
94// by the operation if the service models the API operation's errors. These
95// errors will also be available as const strings prefixed with "ErrCode".
96//
97// result, err := svc.AssumeRole(params)
98// if err != nil {
99// // Cast err to awserr.Error to handle specific error codes.
100// aerr, ok := err.(awserr.Error)
101// if ok && aerr.Code() == <error code to check for> {
102// // Specific error code handling
103// }
104// return err
105// }
106//
107// fmt.Println("AssumeRole result:")
108// fmt.Println(result)
109//
110// Using the Client with Context
111//
112// The service's client also provides methods to make API requests with a Context
113// value. This allows you to control the timeout, and cancellation of pending
114// requests. These methods also take request Option as variadic parameter to apply
115// additional configuration to the API request.
116//
117// ctx := context.Background()
118//
119// result, err := svc.AssumeRoleWithContext(ctx, params)
120//
121// See the request package documentation for more information on using Context pattern
122// with the SDK.
123// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
124package sts
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
new file mode 100644
index 0000000..e24884e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
@@ -0,0 +1,73 @@
1// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
2
3package sts
4
5const (
6
7 // ErrCodeExpiredTokenException for service response error code
8 // "ExpiredTokenException".
9 //
10 // The web identity token that was passed is expired or is not valid. Get a
11 // new identity token from the identity provider and then retry the request.
12 ErrCodeExpiredTokenException = "ExpiredTokenException"
13
14 // ErrCodeIDPCommunicationErrorException for service response error code
15 // "IDPCommunicationError".
16 //
17 // The request could not be fulfilled because the non-AWS identity provider
18 // (IDP) that was asked to verify the incoming identity token could not be reached.
19 // This is often a transient error caused by network conditions. Retry the request
20 // a limited number of times so that you don't exceed the request rate. If the
21 // error persists, the non-AWS identity provider might be down or not responding.
22 ErrCodeIDPCommunicationErrorException = "IDPCommunicationError"
23
24 // ErrCodeIDPRejectedClaimException for service response error code
25 // "IDPRejectedClaim".
26 //
27 // The identity provider (IdP) reported that authentication failed. This might
28 // be because the claim is invalid.
29 //
30 // If this error is returned for the AssumeRoleWithWebIdentity operation, it
31 // can also mean that the claim has expired or has been explicitly revoked.
32 ErrCodeIDPRejectedClaimException = "IDPRejectedClaim"
33
34 // ErrCodeInvalidAuthorizationMessageException for service response error code
35 // "InvalidAuthorizationMessageException".
36 //
37 // The error returned if the message passed to DecodeAuthorizationMessage was
38 // invalid. This can happen if the token contains invalid characters, such as
39 // linebreaks.
40 ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException"
41
42 // ErrCodeInvalidIdentityTokenException for service response error code
43 // "InvalidIdentityToken".
44 //
45 // The web identity token that was passed could not be validated by AWS. Get
46 // a new identity token from the identity provider and then retry the request.
47 ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken"
48
49 // ErrCodeMalformedPolicyDocumentException for service response error code
50 // "MalformedPolicyDocument".
51 //
52 // The request was rejected because the policy document was malformed. The error
53 // message describes the specific error.
54 ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument"
55
56 // ErrCodePackedPolicyTooLargeException for service response error code
57 // "PackedPolicyTooLarge".
58 //
59 // The request was rejected because the policy document was too large. The error
60 // message describes how big the policy document is, in packed form, as a percentage
61 // of what the API allows.
62 ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge"
63
64 // ErrCodeRegionDisabledException for service response error code
65 // "RegionDisabledException".
66 //
67 // STS is not activated in the requested region for the account that is being
68 // asked to generate credentials. The account administrator must use the IAM
69 // console to activate STS in that region. For more information, see Activating
70 // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
71 // in the IAM User Guide.
72 ErrCodeRegionDisabledException = "RegionDisabledException"
73)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
new file mode 100644
index 0000000..1ee5839
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
@@ -0,0 +1,93 @@
1// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
2
3package sts
4
5import (
6 "github.com/aws/aws-sdk-go/aws"
7 "github.com/aws/aws-sdk-go/aws/client"
8 "github.com/aws/aws-sdk-go/aws/client/metadata"
9 "github.com/aws/aws-sdk-go/aws/request"
10 "github.com/aws/aws-sdk-go/aws/signer/v4"
11 "github.com/aws/aws-sdk-go/private/protocol/query"
12)
13
14// STS provides the API operation methods for making requests to
15// AWS Security Token Service. See this package's package overview docs
16// for details on the service.
17//
18// STS methods are safe to use concurrently. It is not safe to
19// modify mutate any of the struct's properties though.
20type STS struct {
21 *client.Client
22}
23
24// Used for custom client initialization logic
25var initClient func(*client.Client)
26
27// Used for custom request initialization logic
28var initRequest func(*request.Request)
29
30// Service information constants
31const (
32 ServiceName = "sts" // Service endpoint prefix API calls made to.
33 EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
34)
35
36// New creates a new instance of the STS client with a session.
37// If additional configuration is needed for the client instance use the optional
38// aws.Config parameter to add your extra config.
39//
40// Example:
41// // Create a STS client from just a session.
42// svc := sts.New(mySession)
43//
44// // Create a STS client with additional configuration
45// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
46func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
47 c := p.ClientConfig(EndpointsID, cfgs...)
48 return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
49}
50
51// newClient creates, initializes and returns a new service client instance.
52func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS {
53 svc := &STS{
54 Client: client.New(
55 cfg,
56 metadata.ClientInfo{
57 ServiceName: ServiceName,
58 SigningName: signingName,
59 SigningRegion: signingRegion,
60 Endpoint: endpoint,
61 APIVersion: "2011-06-15",
62 },
63 handlers,
64 ),
65 }
66
67 // Handlers
68 svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
69 svc.Handlers.Build.PushBackNamed(query.BuildHandler)
70 svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
71 svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
72 svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
73
74 // Run custom client initialization if present
75 if initClient != nil {
76 initClient(svc.Client)
77 }
78
79 return svc
80}
81
82// newRequest creates a new request for a STS operation and runs any
83// custom request initialization.
84func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
85 req := c.NewRequest(op, params, data)
86
87 // Run custom request initialization if present
88 if initRequest != nil {
89 initRequest(req)
90 }
91
92 return req
93}
diff --git a/vendor/github.com/bgentry/go-netrc/LICENSE b/vendor/github.com/bgentry/go-netrc/LICENSE
new file mode 100644
index 0000000..aade9a5
--- /dev/null
+++ b/vendor/github.com/bgentry/go-netrc/LICENSE
@@ -0,0 +1,20 @@
1Original version Copyright © 2010 Fazlul Shahriar <fshahriar@gmail.com>. Newer
2portions Copyright © 2014 Blake Gentry <blakesgentry@gmail.com>.
3
4Permission is hereby granted, free of charge, to any person obtaining a copy
5of this software and associated documentation files (the "Software"), to deal
6in the Software without restriction, including without limitation the rights
7to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8copies of the Software, and to permit persons to whom the Software is
9furnished to do so, subject to the following conditions:
10
11The above copyright notice and this permission notice shall be included in
12all copies or substantial portions of the Software.
13
14THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20THE SOFTWARE.
diff --git a/vendor/github.com/bgentry/go-netrc/netrc/netrc.go b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go
new file mode 100644
index 0000000..ea49987
--- /dev/null
+++ b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go
@@ -0,0 +1,510 @@
1package netrc
2
3import (
4 "bufio"
5 "bytes"
6 "fmt"
7 "io"
8 "io/ioutil"
9 "os"
10 "strings"
11 "sync"
12 "unicode"
13 "unicode/utf8"
14)
15
16type tkType int
17
18const (
19 tkMachine tkType = iota
20 tkDefault
21 tkLogin
22 tkPassword
23 tkAccount
24 tkMacdef
25 tkComment
26 tkWhitespace
27)
28
29var keywords = map[string]tkType{
30 "machine": tkMachine,
31 "default": tkDefault,
32 "login": tkLogin,
33 "password": tkPassword,
34 "account": tkAccount,
35 "macdef": tkMacdef,
36 "#": tkComment,
37}
38
39type Netrc struct {
40 tokens []*token
41 machines []*Machine
42 macros Macros
43 updateLock sync.Mutex
44}
45
46// FindMachine returns the Machine in n named by name. If a machine named by
47// name exists, it is returned. If no Machine with name name is found and there
48// is a ``default'' machine, the ``default'' machine is returned. Otherwise, nil
49// is returned.
50func (n *Netrc) FindMachine(name string) (m *Machine) {
51 // TODO(bgentry): not safe for concurrency
52 var def *Machine
53 for _, m = range n.machines {
54 if m.Name == name {
55 return m
56 }
57 if m.IsDefault() {
58 def = m
59 }
60 }
61 if def == nil {
62 return nil
63 }
64 return def
65}
66
67// MarshalText implements the encoding.TextMarshaler interface to encode a
68// Netrc into text format.
69func (n *Netrc) MarshalText() (text []byte, err error) {
70 // TODO(bgentry): not safe for concurrency
71 for i := range n.tokens {
72 switch n.tokens[i].kind {
73 case tkComment, tkDefault, tkWhitespace: // always append these types
74 text = append(text, n.tokens[i].rawkind...)
75 default:
76 if n.tokens[i].value != "" { // skip empty-value tokens
77 text = append(text, n.tokens[i].rawkind...)
78 }
79 }
80 if n.tokens[i].kind == tkMacdef {
81 text = append(text, ' ')
82 text = append(text, n.tokens[i].macroName...)
83 }
84 text = append(text, n.tokens[i].rawvalue...)
85 }
86 return
87}
88
89func (n *Netrc) NewMachine(name, login, password, account string) *Machine {
90 n.updateLock.Lock()
91 defer n.updateLock.Unlock()
92
93 prefix := "\n"
94 if len(n.tokens) == 0 {
95 prefix = ""
96 }
97 m := &Machine{
98 Name: name,
99 Login: login,
100 Password: password,
101 Account: account,
102
103 nametoken: &token{
104 kind: tkMachine,
105 rawkind: []byte(prefix + "machine"),
106 value: name,
107 rawvalue: []byte(" " + name),
108 },
109 logintoken: &token{
110 kind: tkLogin,
111 rawkind: []byte("\n\tlogin"),
112 value: login,
113 rawvalue: []byte(" " + login),
114 },
115 passtoken: &token{
116 kind: tkPassword,
117 rawkind: []byte("\n\tpassword"),
118 value: password,
119 rawvalue: []byte(" " + password),
120 },
121 accounttoken: &token{
122 kind: tkAccount,
123 rawkind: []byte("\n\taccount"),
124 value: account,
125 rawvalue: []byte(" " + account),
126 },
127 }
128 n.insertMachineTokensBeforeDefault(m)
129 for i := range n.machines {
130 if n.machines[i].IsDefault() {
131 n.machines = append(append(n.machines[:i], m), n.machines[i:]...)
132 return m
133 }
134 }
135 n.machines = append(n.machines, m)
136 return m
137}
138
139func (n *Netrc) insertMachineTokensBeforeDefault(m *Machine) {
140 newtokens := []*token{m.nametoken}
141 if m.logintoken.value != "" {
142 newtokens = append(newtokens, m.logintoken)
143 }
144 if m.passtoken.value != "" {
145 newtokens = append(newtokens, m.passtoken)
146 }
147 if m.accounttoken.value != "" {
148 newtokens = append(newtokens, m.accounttoken)
149 }
150 for i := range n.tokens {
151 if n.tokens[i].kind == tkDefault {
152 // found the default, now insert tokens before it
153 n.tokens = append(n.tokens[:i], append(newtokens, n.tokens[i:]...)...)
154 return
155 }
156 }
157 // didn't find a default, just add the newtokens to the end
158 n.tokens = append(n.tokens, newtokens...)
159 return
160}
161
162func (n *Netrc) RemoveMachine(name string) {
163 n.updateLock.Lock()
164 defer n.updateLock.Unlock()
165
166 for i := range n.machines {
167 if n.machines[i] != nil && n.machines[i].Name == name {
168 m := n.machines[i]
169 for _, t := range []*token{
170 m.nametoken, m.logintoken, m.passtoken, m.accounttoken,
171 } {
172 n.removeToken(t)
173 }
174 n.machines = append(n.machines[:i], n.machines[i+1:]...)
175 return
176 }
177 }
178}
179
180func (n *Netrc) removeToken(t *token) {
181 if t != nil {
182 for i := range n.tokens {
183 if n.tokens[i] == t {
184 n.tokens = append(n.tokens[:i], n.tokens[i+1:]...)
185 return
186 }
187 }
188 }
189}
190
191// Machine contains information about a remote machine.
192type Machine struct {
193 Name string
194 Login string
195 Password string
196 Account string
197
198 nametoken *token
199 logintoken *token
200 passtoken *token
201 accounttoken *token
202}
203
204// IsDefault returns true if the machine is a "default" token, denoted by an
205// empty name.
206func (m *Machine) IsDefault() bool {
207 return m.Name == ""
208}
209
210// UpdatePassword sets the password for the Machine m.
211func (m *Machine) UpdatePassword(newpass string) {
212 m.Password = newpass
213 updateTokenValue(m.passtoken, newpass)
214}
215
216// UpdateLogin sets the login for the Machine m.
217func (m *Machine) UpdateLogin(newlogin string) {
218 m.Login = newlogin
219 updateTokenValue(m.logintoken, newlogin)
220}
221
222// UpdateAccount sets the login for the Machine m.
223func (m *Machine) UpdateAccount(newaccount string) {
224 m.Account = newaccount
225 updateTokenValue(m.accounttoken, newaccount)
226}
227
228func updateTokenValue(t *token, value string) {
229 oldvalue := t.value
230 t.value = value
231 newraw := make([]byte, len(t.rawvalue))
232 copy(newraw, t.rawvalue)
233 t.rawvalue = append(
234 bytes.TrimSuffix(newraw, []byte(oldvalue)),
235 []byte(value)...,
236 )
237}
238
239// Macros contains all the macro definitions in a netrc file.
240type Macros map[string]string
241
242type token struct {
243 kind tkType
244 macroName string
245 value string
246 rawkind []byte
247 rawvalue []byte
248}
249
250// Error represents a netrc file parse error.
251type Error struct {
252 LineNum int // Line number
253 Msg string // Error message
254}
255
256// Error returns a string representation of error e.
257func (e *Error) Error() string {
258 return fmt.Sprintf("line %d: %s", e.LineNum, e.Msg)
259}
260
261func (e *Error) BadDefaultOrder() bool {
262 return e.Msg == errBadDefaultOrder
263}
264
265const errBadDefaultOrder = "default token must appear after all machine tokens"
266
267// scanLinesKeepPrefix is a split function for a Scanner that returns each line
268// of text. The returned token may include newlines if they are before the
269// first non-space character. The returned line may be empty. The end-of-line
270// marker is one optional carriage return followed by one mandatory newline. In
271// regular expression notation, it is `\r?\n`. The last non-empty line of
272// input will be returned even if it has no newline.
273func scanLinesKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) {
274 if atEOF && len(data) == 0 {
275 return 0, nil, nil
276 }
277 // Skip leading spaces.
278 start := 0
279 for width := 0; start < len(data); start += width {
280 var r rune
281 r, width = utf8.DecodeRune(data[start:])
282 if !unicode.IsSpace(r) {
283 break
284 }
285 }
286 if i := bytes.IndexByte(data[start:], '\n'); i >= 0 {
287 // We have a full newline-terminated line.
288 return start + i, data[0 : start+i], nil
289 }
290 // If we're at EOF, we have a final, non-terminated line. Return it.
291 if atEOF {
292 return len(data), data, nil
293 }
294 // Request more data.
295 return 0, nil, nil
296}
297
298// scanWordsKeepPrefix is a split function for a Scanner that returns each
299// space-separated word of text, with prefixing spaces included. It will never
300// return an empty string. The definition of space is set by unicode.IsSpace.
301//
302// Adapted from bufio.ScanWords().
303func scanTokensKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) {
304 // Skip leading spaces.
305 start := 0
306 for width := 0; start < len(data); start += width {
307 var r rune
308 r, width = utf8.DecodeRune(data[start:])
309 if !unicode.IsSpace(r) {
310 break
311 }
312 }
313 if atEOF && len(data) == 0 || start == len(data) {
314 return len(data), data, nil
315 }
316 if len(data) > start && data[start] == '#' {
317 return scanLinesKeepPrefix(data, atEOF)
318 }
319 // Scan until space, marking end of word.
320 for width, i := 0, start; i < len(data); i += width {
321 var r rune
322 r, width = utf8.DecodeRune(data[i:])
323 if unicode.IsSpace(r) {
324 return i, data[:i], nil
325 }
326 }
327 // If we're at EOF, we have a final, non-empty, non-terminated word. Return it.
328 if atEOF && len(data) > start {
329 return len(data), data, nil
330 }
331 // Request more data.
332 return 0, nil, nil
333}
334
335func newToken(rawb []byte) (*token, error) {
336 _, tkind, err := bufio.ScanWords(rawb, true)
337 if err != nil {
338 return nil, err
339 }
340 var ok bool
341 t := token{rawkind: rawb}
342 t.kind, ok = keywords[string(tkind)]
343 if !ok {
344 trimmed := strings.TrimSpace(string(tkind))
345 if trimmed == "" {
346 t.kind = tkWhitespace // whitespace-only, should happen only at EOF
347 return &t, nil
348 }
349 if strings.HasPrefix(trimmed, "#") {
350 t.kind = tkComment // this is a comment
351 return &t, nil
352 }
353 return &t, fmt.Errorf("keyword expected; got " + string(tkind))
354 }
355 return &t, nil
356}
357
358func scanValue(scanner *bufio.Scanner, pos int) ([]byte, string, int, error) {
359 if scanner.Scan() {
360 raw := scanner.Bytes()
361 pos += bytes.Count(raw, []byte{'\n'})
362 return raw, strings.TrimSpace(string(raw)), pos, nil
363 }
364 if err := scanner.Err(); err != nil {
365 return nil, "", pos, &Error{pos, err.Error()}
366 }
367 return nil, "", pos, nil
368}
369
370func parse(r io.Reader, pos int) (*Netrc, error) {
371 b, err := ioutil.ReadAll(r)
372 if err != nil {
373 return nil, err
374 }
375
376 nrc := Netrc{machines: make([]*Machine, 0, 20), macros: make(Macros, 10)}
377
378 defaultSeen := false
379 var currentMacro *token
380 var m *Machine
381 var t *token
382 scanner := bufio.NewScanner(bytes.NewReader(b))
383 scanner.Split(scanTokensKeepPrefix)
384
385 for scanner.Scan() {
386 rawb := scanner.Bytes()
387 if len(rawb) == 0 {
388 break
389 }
390 pos += bytes.Count(rawb, []byte{'\n'})
391 t, err = newToken(rawb)
392 if err != nil {
393 if currentMacro == nil {
394 return nil, &Error{pos, err.Error()}
395 }
396 currentMacro.rawvalue = append(currentMacro.rawvalue, rawb...)
397 continue
398 }
399
400 if currentMacro != nil && bytes.Contains(rawb, []byte{'\n', '\n'}) {
401 // if macro rawvalue + rawb would contain \n\n, then macro def is over
402 currentMacro.value = strings.TrimLeft(string(currentMacro.rawvalue), "\r\n")
403 nrc.macros[currentMacro.macroName] = currentMacro.value
404 currentMacro = nil
405 }
406
407 switch t.kind {
408 case tkMacdef:
409 if _, t.macroName, pos, err = scanValue(scanner, pos); err != nil {
410 return nil, &Error{pos, err.Error()}
411 }
412 currentMacro = t
413 case tkDefault:
414 if defaultSeen {
415 return nil, &Error{pos, "multiple default token"}
416 }
417 if m != nil {
418 nrc.machines, m = append(nrc.machines, m), nil
419 }
420 m = new(Machine)
421 m.Name = ""
422 defaultSeen = true
423 case tkMachine:
424 if defaultSeen {
425 return nil, &Error{pos, errBadDefaultOrder}
426 }
427 if m != nil {
428 nrc.machines, m = append(nrc.machines, m), nil
429 }
430 m = new(Machine)
431 if t.rawvalue, m.Name, pos, err = scanValue(scanner, pos); err != nil {
432 return nil, &Error{pos, err.Error()}
433 }
434 t.value = m.Name
435 m.nametoken = t
436 case tkLogin:
437 if m == nil || m.Login != "" {
438 return nil, &Error{pos, "unexpected token login "}
439 }
440 if t.rawvalue, m.Login, pos, err = scanValue(scanner, pos); err != nil {
441 return nil, &Error{pos, err.Error()}
442 }
443 t.value = m.Login
444 m.logintoken = t
445 case tkPassword:
446 if m == nil || m.Password != "" {
447 return nil, &Error{pos, "unexpected token password"}
448 }
449 if t.rawvalue, m.Password, pos, err = scanValue(scanner, pos); err != nil {
450 return nil, &Error{pos, err.Error()}
451 }
452 t.value = m.Password
453 m.passtoken = t
454 case tkAccount:
455 if m == nil || m.Account != "" {
456 return nil, &Error{pos, "unexpected token account"}
457 }
458 if t.rawvalue, m.Account, pos, err = scanValue(scanner, pos); err != nil {
459 return nil, &Error{pos, err.Error()}
460 }
461 t.value = m.Account
462 m.accounttoken = t
463 }
464
465 nrc.tokens = append(nrc.tokens, t)
466 }
467
468 if err := scanner.Err(); err != nil {
469 return nil, err
470 }
471
472 if m != nil {
473 nrc.machines, m = append(nrc.machines, m), nil
474 }
475 return &nrc, nil
476}
477
478// ParseFile opens the file at filename and then passes its io.Reader to
479// Parse().
480func ParseFile(filename string) (*Netrc, error) {
481 fd, err := os.Open(filename)
482 if err != nil {
483 return nil, err
484 }
485 defer fd.Close()
486 return Parse(fd)
487}
488
489// Parse parses from the the Reader r as a netrc file and returns the set of
490// machine information and macros defined in it. The ``default'' machine,
491// which is intended to be used when no machine name matches, is identified
492// by an empty machine name. There can be only one ``default'' machine.
493//
494// If there is a parsing error, an Error is returned.
495func Parse(r io.Reader) (*Netrc, error) {
496 return parse(r, 1)
497}
498
499// FindMachine parses the netrc file identified by filename and returns the
500// Machine named by name. If a problem occurs parsing the file at filename, an
501// error is returned. If a machine named by name exists, it is returned. If no
502// Machine with name name is found and there is a ``default'' machine, the
503// ``default'' machine is returned. Otherwise, nil is returned.
504func FindMachine(filename, name string) (m *Machine, err error) {
505 n, err := ParseFile(filename)
506 if err != nil {
507 return nil, err
508 }
509 return n.FindMachine(name), nil
510}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..c836416
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
1ISC License
2
3Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
4
5Permission to use, copy, modify, and distribute this software for any
6purpose with or without fee is hereby granted, provided that the above
7copyright notice and this permission notice appear in all copies.
8
9THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..8a4a658
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,152 @@
1// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
2//
3// Permission to use, copy, modify, and distribute this software for any
4// purpose with or without fee is hereby granted, provided that the above
5// copyright notice and this permission notice appear in all copies.
6//
7// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
13// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14
15// NOTE: Due to the following build constraints, this file will only be compiled
16// when the code is not running on Google App Engine, compiled by GopherJS, and
17// "-tags safe" is not added to the go build command line. The "disableunsafe"
18// tag is deprecated and thus should not be used.
19// +build !js,!appengine,!safe,!disableunsafe
20
21package spew
22
23import (
24 "reflect"
25 "unsafe"
26)
27
28const (
29 // UnsafeDisabled is a build-time constant which specifies whether or
30 // not access to the unsafe package is available.
31 UnsafeDisabled = false
32
33 // ptrSize is the size of a pointer on the current arch.
34 ptrSize = unsafe.Sizeof((*byte)(nil))
35)
36
37var (
38 // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
39 // internal reflect.Value fields. These values are valid before golang
40 // commit ecccf07e7f9d which changed the format. The are also valid
41 // after commit 82f48826c6c7 which changed the format again to mirror
42 // the original format. Code in the init function updates these offsets
43 // as necessary.
44 offsetPtr = uintptr(ptrSize)
45 offsetScalar = uintptr(0)
46 offsetFlag = uintptr(ptrSize * 2)
47
48 // flagKindWidth and flagKindShift indicate various bits that the
49 // reflect package uses internally to track kind information.
50 //
51 // flagRO indicates whether or not the value field of a reflect.Value is
52 // read-only.
53 //
54 // flagIndir indicates whether the value field of a reflect.Value is
55 // the actual data or a pointer to the data.
56 //
57 // These values are valid before golang commit 90a7c3c86944 which
58 // changed their positions. Code in the init function updates these
59 // flags as necessary.
60 flagKindWidth = uintptr(5)
61 flagKindShift = uintptr(flagKindWidth - 1)
62 flagRO = uintptr(1 << 0)
63 flagIndir = uintptr(1 << 1)
64)
65
66func init() {
67 // Older versions of reflect.Value stored small integers directly in the
68 // ptr field (which is named val in the older versions). Versions
69 // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
70 // scalar for this purpose which unfortunately came before the flag
71 // field, so the offset of the flag field is different for those
72 // versions.
73 //
74 // This code constructs a new reflect.Value from a known small integer
75 // and checks if the size of the reflect.Value struct indicates it has
76 // the scalar field. When it does, the offsets are updated accordingly.
77 vv := reflect.ValueOf(0xf00)
78 if unsafe.Sizeof(vv) == (ptrSize * 4) {
79 offsetScalar = ptrSize * 2
80 offsetFlag = ptrSize * 3
81 }
82
83 // Commit 90a7c3c86944 changed the flag positions such that the low
84 // order bits are the kind. This code extracts the kind from the flags
85 // field and ensures it's the correct type. When it's not, the flag
86 // order has been changed to the newer format, so the flags are updated
87 // accordingly.
88 upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
89 upfv := *(*uintptr)(upf)
90 flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
91 if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
92 flagKindShift = 0
93 flagRO = 1 << 5
94 flagIndir = 1 << 6
95
96 // Commit adf9b30e5594 modified the flags to separate the
97 // flagRO flag into two bits which specifies whether or not the
98 // field is embedded. This causes flagIndir to move over a bit
99 // and means that flagRO is the combination of either of the
100 // original flagRO bit and the new bit.
101 //
102 // This code detects the change by extracting what used to be
103 // the indirect bit to ensure it's set. When it's not, the flag
104 // order has been changed to the newer format, so the flags are
105 // updated accordingly.
106 if upfv&flagIndir == 0 {
107 flagRO = 3 << 5
108 flagIndir = 1 << 7
109 }
110 }
111}
112
113// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
114// the typical safety restrictions preventing access to unaddressable and
115// unexported data. It works by digging the raw pointer to the underlying
116// value out of the protected value and generating a new unprotected (unsafe)
117// reflect.Value to it.
118//
119// This allows us to check for implementations of the Stringer and error
120// interfaces to be used for pretty printing ordinarily unaddressable and
121// inaccessible values such as unexported struct fields.
122func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
123 indirects := 1
124 vt := v.Type()
125 upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
126 rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
127 if rvf&flagIndir != 0 {
128 vt = reflect.PtrTo(v.Type())
129 indirects++
130 } else if offsetScalar != 0 {
131 // The value is in the scalar field when it's not one of the
132 // reference types.
133 switch vt.Kind() {
134 case reflect.Uintptr:
135 case reflect.Chan:
136 case reflect.Func:
137 case reflect.Map:
138 case reflect.Ptr:
139 case reflect.UnsafePointer:
140 default:
141 upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
142 offsetScalar)
143 }
144 }
145
146 pv := reflect.NewAt(vt, upv)
147 rv = pv
148 for i := 0; i < indirects; i++ {
149 rv = rv.Elem()
150 }
151 return rv
152}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..1fe3cf3
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
1// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
2//
3// Permission to use, copy, modify, and distribute this software for any
4// purpose with or without fee is hereby granted, provided that the above
5// copyright notice and this permission notice appear in all copies.
6//
7// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
13// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14
15// NOTE: Due to the following build constraints, this file will only be compiled
16// when the code is running on Google App Engine, compiled by GopherJS, or
17// "-tags safe" is added to the go build command line. The "disableunsafe"
18// tag is deprecated and thus should not be used.
19// +build js appengine safe disableunsafe
20
21package spew
22
23import "reflect"
24
25const (
26 // UnsafeDisabled is a build-time constant which specifies whether or
27 // not access to the unsafe package is available.
28 UnsafeDisabled = true
29)
30
31// unsafeReflectValue typically converts the passed reflect.Value into a one
32// that bypasses the typical safety restrictions preventing access to
33// unaddressable and unexported data. However, doing this relies on access to
34// the unsafe package. This is a stub version which simply returns the passed
35// reflect.Value when the unsafe package is not available.
36func unsafeReflectValue(v reflect.Value) reflect.Value {
37 return v
38}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..7c519ff
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
1/*
2 * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17package spew
18
19import (
20 "bytes"
21 "fmt"
22 "io"
23 "reflect"
24 "sort"
25 "strconv"
26)
27
28// Some constants in the form of bytes to avoid string overhead. This mirrors
29// the technique used in the fmt package.
30var (
31 panicBytes = []byte("(PANIC=")
32 plusBytes = []byte("+")
33 iBytes = []byte("i")
34 trueBytes = []byte("true")
35 falseBytes = []byte("false")
36 interfaceBytes = []byte("(interface {})")
37 commaNewlineBytes = []byte(",\n")
38 newlineBytes = []byte("\n")
39 openBraceBytes = []byte("{")
40 openBraceNewlineBytes = []byte("{\n")
41 closeBraceBytes = []byte("}")
42 asteriskBytes = []byte("*")
43 colonBytes = []byte(":")
44 colonSpaceBytes = []byte(": ")
45 openParenBytes = []byte("(")
46 closeParenBytes = []byte(")")
47 spaceBytes = []byte(" ")
48 pointerChainBytes = []byte("->")
49 nilAngleBytes = []byte("<nil>")
50 maxNewlineBytes = []byte("<max depth reached>\n")
51 maxShortBytes = []byte("<max>")
52 circularBytes = []byte("<already shown>")
53 circularShortBytes = []byte("<shown>")
54 invalidAngleBytes = []byte("<invalid>")
55 openBracketBytes = []byte("[")
56 closeBracketBytes = []byte("]")
57 percentBytes = []byte("%")
58 precisionBytes = []byte(".")
59 openAngleBytes = []byte("<")
60 closeAngleBytes = []byte(">")
61 openMapBytes = []byte("map[")
62 closeMapBytes = []byte("]")
63 lenEqualsBytes = []byte("len=")
64 capEqualsBytes = []byte("cap=")
65)
66
67// hexDigits is used to map a decimal value to a hex digit.
68var hexDigits = "0123456789abcdef"
69
70// catchPanic handles any panics that might occur during the handleMethods
71// calls.
72func catchPanic(w io.Writer, v reflect.Value) {
73 if err := recover(); err != nil {
74 w.Write(panicBytes)
75 fmt.Fprintf(w, "%v", err)
76 w.Write(closeParenBytes)
77 }
78}
79
80// handleMethods attempts to call the Error and String methods on the underlying
81// type the passed reflect.Value represents and outputes the result to Writer w.
82//
83// It handles panics in any called methods by catching and displaying the error
84// as the formatted value.
85func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
86 // We need an interface to check if the type implements the error or
87 // Stringer interface. However, the reflect package won't give us an
88 // interface on certain things like unexported struct fields in order
89 // to enforce visibility rules. We use unsafe, when it's available,
90 // to bypass these restrictions since this package does not mutate the
91 // values.
92 if !v.CanInterface() {
93 if UnsafeDisabled {
94 return false
95 }
96
97 v = unsafeReflectValue(v)
98 }
99
100 // Choose whether or not to do error and Stringer interface lookups against
101 // the base type or a pointer to the base type depending on settings.
102 // Technically calling one of these methods with a pointer receiver can
103 // mutate the value, however, types which choose to satisify an error or
104 // Stringer interface with a pointer receiver should not be mutating their
105 // state inside these interface methods.
106 if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
107 v = unsafeReflectValue(v)
108 }
109 if v.CanAddr() {
110 v = v.Addr()
111 }
112
113 // Is it an error or Stringer?
114 switch iface := v.Interface().(type) {
115 case error:
116 defer catchPanic(w, v)
117 if cs.ContinueOnMethod {
118 w.Write(openParenBytes)
119 w.Write([]byte(iface.Error()))
120 w.Write(closeParenBytes)
121 w.Write(spaceBytes)
122 return false
123 }
124
125 w.Write([]byte(iface.Error()))
126 return true
127
128 case fmt.Stringer:
129 defer catchPanic(w, v)
130 if cs.ContinueOnMethod {
131 w.Write(openParenBytes)
132 w.Write([]byte(iface.String()))
133 w.Write(closeParenBytes)
134 w.Write(spaceBytes)
135 return false
136 }
137 w.Write([]byte(iface.String()))
138 return true
139 }
140 return false
141}
142
143// printBool outputs a boolean value as true or false to Writer w.
144func printBool(w io.Writer, val bool) {
145 if val {
146 w.Write(trueBytes)
147 } else {
148 w.Write(falseBytes)
149 }
150}
151
152// printInt outputs a signed integer value to Writer w.
153func printInt(w io.Writer, val int64, base int) {
154 w.Write([]byte(strconv.FormatInt(val, base)))
155}
156
157// printUint outputs an unsigned integer value to Writer w.
158func printUint(w io.Writer, val uint64, base int) {
159 w.Write([]byte(strconv.FormatUint(val, base)))
160}
161
162// printFloat outputs a floating point value using the specified precision,
163// which is expected to be 32 or 64bit, to Writer w.
164func printFloat(w io.Writer, val float64, precision int) {
165 w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
166}
167
168// printComplex outputs a complex value using the specified float precision
169// for the real and imaginary parts to Writer w.
170func printComplex(w io.Writer, c complex128, floatPrecision int) {
171 r := real(c)
172 w.Write(openParenBytes)
173 w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
174 i := imag(c)
175 if i >= 0 {
176 w.Write(plusBytes)
177 }
178 w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
179 w.Write(iBytes)
180 w.Write(closeParenBytes)
181}
182
183// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
184// prefix to Writer w.
185func printHexPtr(w io.Writer, p uintptr) {
186 // Null pointer.
187 num := uint64(p)
188 if num == 0 {
189 w.Write(nilAngleBytes)
190 return
191 }
192
193 // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
194 buf := make([]byte, 18)
195
196 // It's simpler to construct the hex string right to left.
197 base := uint64(16)
198 i := len(buf) - 1
199 for num >= base {
200 buf[i] = hexDigits[num%base]
201 num /= base
202 i--
203 }
204 buf[i] = hexDigits[num]
205
206 // Add '0x' prefix.
207 i--
208 buf[i] = 'x'
209 i--
210 buf[i] = '0'
211
212 // Strip unused leading bytes.
213 buf = buf[i:]
214 w.Write(buf)
215}
216
217// valuesSorter implements sort.Interface to allow a slice of reflect.Value
218// elements to be sorted.
219type valuesSorter struct {
220 values []reflect.Value
221 strings []string // either nil or same len and values
222 cs *ConfigState
223}
224
225// newValuesSorter initializes a valuesSorter instance, which holds a set of
226// surrogate keys on which the data should be sorted. It uses flags in
227// ConfigState to decide if and how to populate those surrogate keys.
228func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
229 vs := &valuesSorter{values: values, cs: cs}
230 if canSortSimply(vs.values[0].Kind()) {
231 return vs
232 }
233 if !cs.DisableMethods {
234 vs.strings = make([]string, len(values))
235 for i := range vs.values {
236 b := bytes.Buffer{}
237 if !handleMethods(cs, &b, vs.values[i]) {
238 vs.strings = nil
239 break
240 }
241 vs.strings[i] = b.String()
242 }
243 }
244 if vs.strings == nil && cs.SpewKeys {
245 vs.strings = make([]string, len(values))
246 for i := range vs.values {
247 vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
248 }
249 }
250 return vs
251}
252
253// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
254// directly, or whether it should be considered for sorting by surrogate keys
255// (if the ConfigState allows it).
256func canSortSimply(kind reflect.Kind) bool {
257 // This switch parallels valueSortLess, except for the default case.
258 switch kind {
259 case reflect.Bool:
260 return true
261 case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
262 return true
263 case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
264 return true
265 case reflect.Float32, reflect.Float64:
266 return true
267 case reflect.String:
268 return true
269 case reflect.Uintptr:
270 return true
271 case reflect.Array:
272 return true
273 }
274 return false
275}
276
277// Len returns the number of values in the slice. It is part of the
278// sort.Interface implementation.
279func (s *valuesSorter) Len() int {
280 return len(s.values)
281}
282
283// Swap swaps the values at the passed indices. It is part of the
284// sort.Interface implementation.
285func (s *valuesSorter) Swap(i, j int) {
286 s.values[i], s.values[j] = s.values[j], s.values[i]
287 if s.strings != nil {
288 s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
289 }
290}
291
292// valueSortLess returns whether the first value should sort before the second
293// value. It is used by valueSorter.Less as part of the sort.Interface
294// implementation.
295func valueSortLess(a, b reflect.Value) bool {
296 switch a.Kind() {
297 case reflect.Bool:
298 return !a.Bool() && b.Bool()
299 case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
300 return a.Int() < b.Int()
301 case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
302 return a.Uint() < b.Uint()
303 case reflect.Float32, reflect.Float64:
304 return a.Float() < b.Float()
305 case reflect.String:
306 return a.String() < b.String()
307 case reflect.Uintptr:
308 return a.Uint() < b.Uint()
309 case reflect.Array:
310 // Compare the contents of both arrays.
311 l := a.Len()
312 for i := 0; i < l; i++ {
313 av := a.Index(i)
314 bv := b.Index(i)
315 if av.Interface() == bv.Interface() {
316 continue
317 }
318 return valueSortLess(av, bv)
319 }
320 }
321 return a.String() < b.String()
322}
323
324// Less returns whether the value at index i should sort before the
325// value at index j. It is part of the sort.Interface implementation.
326func (s *valuesSorter) Less(i, j int) bool {
327 if s.strings == nil {
328 return valueSortLess(s.values[i], s.values[j])
329 }
330 return s.strings[i] < s.strings[j]
331}
332
333// sortValues is a sort function that handles both native types and any type that
334// can be converted to error or Stringer. Other inputs are sorted according to
335// their Value.String() value to ensure display stability.
336func sortValues(values []reflect.Value, cs *ConfigState) {
337 if len(values) == 0 {
338 return
339 }
340 sort.Sort(newValuesSorter(values, cs))
341}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
1/*
2 * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17package spew
18
19import (
20 "bytes"
21 "fmt"
22 "io"
23 "os"
24)
25
26// ConfigState houses the configuration options used by spew to format and
27// display values. There is a global instance, Config, that is used to control
28// all top-level Formatter and Dump functionality. Each ConfigState instance
29// provides methods equivalent to the top-level functions.
30//
31// The zero value for ConfigState provides no indentation. You would typically
32// want to set it to a space or a tab.
33//
34// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
35// with default settings. See the documentation of NewDefaultConfig for default
36// values.
37type ConfigState struct {
38 // Indent specifies the string to use for each indentation level. The
39 // global config instance that all top-level functions use set this to a
40 // single space by default. If you would like more indentation, you might
41 // set this to a tab with "\t" or perhaps two spaces with " ".
42 Indent string
43
44 // MaxDepth controls the maximum number of levels to descend into nested
45 // data structures. The default, 0, means there is no limit.
46 //
47 // NOTE: Circular data structures are properly detected, so it is not
48 // necessary to set this value unless you specifically want to limit deeply
49 // nested data structures.
50 MaxDepth int
51
52 // DisableMethods specifies whether or not error and Stringer interfaces are
53 // invoked for types that implement them.
54 DisableMethods bool
55
56 // DisablePointerMethods specifies whether or not to check for and invoke
57 // error and Stringer interfaces on types which only accept a pointer
58 // receiver when the current type is not a pointer.
59 //
60 // NOTE: This might be an unsafe action since calling one of these methods
61 // with a pointer receiver could technically mutate the value, however,
62 // in practice, types which choose to satisify an error or Stringer
63 // interface with a pointer receiver should not be mutating their state
64 // inside these interface methods. As a result, this option relies on
65 // access to the unsafe package, so it will not have any effect when
66 // running in environments without access to the unsafe package such as
67 // Google App Engine or with the "safe" build tag specified.
68 DisablePointerMethods bool
69
70 // DisablePointerAddresses specifies whether to disable the printing of
71 // pointer addresses. This is useful when diffing data structures in tests.
72 DisablePointerAddresses bool
73
74 // DisableCapacities specifies whether to disable the printing of capacities
75 // for arrays, slices, maps and channels. This is useful when diffing
76 // data structures in tests.
77 DisableCapacities bool
78
79 // ContinueOnMethod specifies whether or not recursion should continue once
80 // a custom error or Stringer interface is invoked. The default, false,
81 // means it will print the results of invoking the custom error or Stringer
82 // interface and return immediately instead of continuing to recurse into
83 // the internals of the data type.
84 //
85 // NOTE: This flag does not have any effect if method invocation is disabled
86 // via the DisableMethods or DisablePointerMethods options.
87 ContinueOnMethod bool
88
89 // SortKeys specifies map keys should be sorted before being printed. Use
90 // this to have a more deterministic, diffable output. Note that only
91 // native types (bool, int, uint, floats, uintptr and string) and types
92 // that support the error or Stringer interfaces (if methods are
93 // enabled) are supported, with other types sorted according to the
94 // reflect.Value.String() output which guarantees display stability.
95 SortKeys bool
96
97 // SpewKeys specifies that, as a last resort attempt, map keys should
98 // be spewed to strings and sorted by those strings. This is only
99 // considered if SortKeys is true.
100 SpewKeys bool
101}
102
103// Config is the active configuration of the top-level functions.
104// The configuration can be changed by modifying the contents of spew.Config.
105var Config = ConfigState{Indent: " "}
106
107// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
108// passed with a Formatter interface returned by c.NewFormatter. It returns
109// the formatted string as a value that satisfies error. See NewFormatter
110// for formatting details.
111//
112// This function is shorthand for the following syntax:
113//
114// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
115func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
116 return fmt.Errorf(format, c.convertArgs(a)...)
117}
118
119// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
120// passed with a Formatter interface returned by c.NewFormatter. It returns
121// the number of bytes written and any write error encountered. See
122// NewFormatter for formatting details.
123//
124// This function is shorthand for the following syntax:
125//
126// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
127func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
128 return fmt.Fprint(w, c.convertArgs(a)...)
129}
130
131// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
132// passed with a Formatter interface returned by c.NewFormatter. It returns
133// the number of bytes written and any write error encountered. See
134// NewFormatter for formatting details.
135//
136// This function is shorthand for the following syntax:
137//
138// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
139func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
140 return fmt.Fprintf(w, format, c.convertArgs(a)...)
141}
142
143// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
144// passed with a Formatter interface returned by c.NewFormatter. See
145// NewFormatter for formatting details.
146//
147// This function is shorthand for the following syntax:
148//
149// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
150func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
151 return fmt.Fprintln(w, c.convertArgs(a)...)
152}
153
154// Print is a wrapper for fmt.Print that treats each argument as if it were
155// passed with a Formatter interface returned by c.NewFormatter. It returns
156// the number of bytes written and any write error encountered. See
157// NewFormatter for formatting details.
158//
159// This function is shorthand for the following syntax:
160//
161// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
162func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
163 return fmt.Print(c.convertArgs(a)...)
164}
165
166// Printf is a wrapper for fmt.Printf that treats each argument as if it were
167// passed with a Formatter interface returned by c.NewFormatter. It returns
168// the number of bytes written and any write error encountered. See
169// NewFormatter for formatting details.
170//
171// This function is shorthand for the following syntax:
172//
173// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
174func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
175 return fmt.Printf(format, c.convertArgs(a)...)
176}
177
178// Println is a wrapper for fmt.Println that treats each argument as if it were
179// passed with a Formatter interface returned by c.NewFormatter. It returns
180// the number of bytes written and any write error encountered. See
181// NewFormatter for formatting details.
182//
183// This function is shorthand for the following syntax:
184//
185// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
186func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
187 return fmt.Println(c.convertArgs(a)...)
188}
189
190// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
191// passed with a Formatter interface returned by c.NewFormatter. It returns
192// the resulting string. See NewFormatter for formatting details.
193//
194// This function is shorthand for the following syntax:
195//
196// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
197func (c *ConfigState) Sprint(a ...interface{}) string {
198 return fmt.Sprint(c.convertArgs(a)...)
199}
200
201// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
202// passed with a Formatter interface returned by c.NewFormatter. It returns
203// the resulting string. See NewFormatter for formatting details.
204//
205// This function is shorthand for the following syntax:
206//
207// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
208func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
209 return fmt.Sprintf(format, c.convertArgs(a)...)
210}
211
212// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
213// were passed with a Formatter interface returned by c.NewFormatter. It
214// returns the resulting string. See NewFormatter for formatting details.
215//
216// This function is shorthand for the following syntax:
217//
218// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
219func (c *ConfigState) Sprintln(a ...interface{}) string {
220 return fmt.Sprintln(c.convertArgs(a)...)
221}
222
223/*
224NewFormatter returns a custom formatter that satisfies the fmt.Formatter
225interface. As a result, it integrates cleanly with standard fmt package
226printing functions. The formatter is useful for inline printing of smaller data
227types similar to the standard %v format specifier.
228
229The custom formatter only responds to the %v (most compact), %+v (adds pointer
230addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
231combinations. Any other verbs such as %x and %q will be sent to the the
232standard fmt package for formatting. In addition, the custom formatter ignores
233the width and precision arguments (however they will still work on the format
234specifiers not handled by the custom formatter).
235
236Typically this function shouldn't be called directly. It is much easier to make
237use of the custom formatter by calling one of the convenience functions such as
238c.Printf, c.Println, or c.Printf.
239*/
240func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
241 return newFormatter(c, v)
242}
243
244// Fdump formats and displays the passed arguments to io.Writer w. It formats
245// exactly the same as Dump.
246func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
247 fdump(c, w, a...)
248}
249
250/*
251Dump displays the passed parameters to standard out with newlines, customizable
252indentation, and additional debug information such as complete types and all
253pointer addresses used to indirect to the final value. It provides the
254following features over the built-in printing facilities provided by the fmt
255package:
256
257 * Pointers are dereferenced and followed
258 * Circular data structures are detected and handled properly
259 * Custom Stringer/error interfaces are optionally invoked, including
260 on unexported types
261 * Custom types which only implement the Stringer/error interfaces via
262 a pointer receiver are optionally invoked when passing non-pointer
263 variables
264 * Byte arrays and slices are dumped like the hexdump -C command which
265 includes offsets, byte values in hex, and ASCII output
266
267The configuration options are controlled by modifying the public members
268of c. See ConfigState for options documentation.
269
270See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
271get the formatted result as a string.
272*/
273func (c *ConfigState) Dump(a ...interface{}) {
274 fdump(c, os.Stdout, a...)
275}
276
277// Sdump returns a string with the passed arguments formatted exactly the same
278// as Dump.
279func (c *ConfigState) Sdump(a ...interface{}) string {
280 var buf bytes.Buffer
281 fdump(c, &buf, a...)
282 return buf.String()
283}
284
285// convertArgs accepts a slice of arguments and returns a slice of the same
286// length with each argument converted to a spew Formatter interface using
287// the ConfigState associated with s.
288func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
289 formatters = make([]interface{}, len(args))
290 for index, arg := range args {
291 formatters[index] = newFormatter(c, arg)
292 }
293 return formatters
294}
295
296// NewDefaultConfig returns a ConfigState with the following default settings.
297//
298// Indent: " "
299// MaxDepth: 0
300// DisableMethods: false
301// DisablePointerMethods: false
302// ContinueOnMethod: false
303// SortKeys: false
304func NewDefaultConfig() *ConfigState {
305 return &ConfigState{Indent: " "}
306}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
1/*
2 * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18Package spew implements a deep pretty printer for Go data structures to aid in
19debugging.
20
21A quick overview of the additional features spew provides over the built-in
22printing facilities for Go data types are as follows:
23
24 * Pointers are dereferenced and followed
25 * Circular data structures are detected and handled properly
26 * Custom Stringer/error interfaces are optionally invoked, including
27 on unexported types
28 * Custom types which only implement the Stringer/error interfaces via
29 a pointer receiver are optionally invoked when passing non-pointer
30 variables
31 * Byte arrays and slices are dumped like the hexdump -C command which
32 includes offsets, byte values in hex, and ASCII output (only when using
33 Dump style)
34
35There are two different approaches spew allows for dumping Go data structures:
36
37 * Dump style which prints with newlines, customizable indentation,
38 and additional debug information such as types and all pointer addresses
39 used to indirect to the final value
40 * A custom Formatter interface that integrates cleanly with the standard fmt
41 package and replaces %v, %+v, %#v, and %#+v to provide inline printing
42 similar to the default %v while providing the additional functionality
43 outlined above and passing unsupported format verbs such as %x and %q
44 along to fmt
45
46Quick Start
47
48This section demonstrates how to quickly get started with spew. See the
49sections below for further details on formatting and configuration options.
50
51To dump a variable with full newlines, indentation, type, and pointer
52information use Dump, Fdump, or Sdump:
53 spew.Dump(myVar1, myVar2, ...)
54 spew.Fdump(someWriter, myVar1, myVar2, ...)
55 str := spew.Sdump(myVar1, myVar2, ...)
56
57Alternatively, if you would prefer to use format strings with a compacted inline
58printing style, use the convenience wrappers Printf, Fprintf, etc with
59%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
60%#+v (adds types and pointer addresses):
61 spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
62 spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
63 spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
64 spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
65
66Configuration Options
67
68Configuration of spew is handled by fields in the ConfigState type. For
69convenience, all of the top-level functions use a global state available
70via the spew.Config global.
71
72It is also possible to create a ConfigState instance that provides methods
73equivalent to the top-level functions. This allows concurrent configuration
74options. See the ConfigState documentation for more details.
75
76The following configuration options are available:
77 * Indent
78 String to use for each indentation level for Dump functions.
79 It is a single space by default. A popular alternative is "\t".
80
81 * MaxDepth
82 Maximum number of levels to descend into nested data structures.
83 There is no limit by default.
84
85 * DisableMethods
86 Disables invocation of error and Stringer interface methods.
87 Method invocation is enabled by default.
88
89 * DisablePointerMethods
90 Disables invocation of error and Stringer interface methods on types
91 which only accept pointer receivers from non-pointer variables.
92 Pointer method invocation is enabled by default.
93
94 * DisablePointerAddresses
95 DisablePointerAddresses specifies whether to disable the printing of
96 pointer addresses. This is useful when diffing data structures in tests.
97
98 * DisableCapacities
99 DisableCapacities specifies whether to disable the printing of
100 capacities for arrays, slices, maps and channels. This is useful when
101 diffing data structures in tests.
102
103 * ContinueOnMethod
104 Enables recursion into types after invoking error and Stringer interface
105 methods. Recursion after method invocation is disabled by default.
106
107 * SortKeys
108 Specifies map keys should be sorted before being printed. Use
109 this to have a more deterministic, diffable output. Note that
110 only native types (bool, int, uint, floats, uintptr and string)
111 and types which implement error or Stringer interfaces are
112 supported with other types sorted according to the
113 reflect.Value.String() output which guarantees display
114 stability. Natural map order is used by default.
115
116 * SpewKeys
117 Specifies that, as a last resort attempt, map keys should be
118 spewed to strings and sorted by those strings. This is only
119 considered if SortKeys is true.
120
121Dump Usage
122
123Simply call spew.Dump with a list of variables you want to dump:
124
125 spew.Dump(myVar1, myVar2, ...)
126
127You may also call spew.Fdump if you would prefer to output to an arbitrary
128io.Writer. For example, to dump to standard error:
129
130 spew.Fdump(os.Stderr, myVar1, myVar2, ...)
131
132A third option is to call spew.Sdump to get the formatted output as a string:
133
134 str := spew.Sdump(myVar1, myVar2, ...)
135
136Sample Dump Output
137
138See the Dump example for details on the setup of the types and variables being
139shown here.
140
141 (main.Foo) {
142 unexportedField: (*main.Bar)(0xf84002e210)({
143 flag: (main.Flag) flagTwo,
144 data: (uintptr) <nil>
145 }),
146 ExportedField: (map[interface {}]interface {}) (len=1) {
147 (string) (len=3) "one": (bool) true
148 }
149 }
150
151Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
152command as shown.
153 ([]uint8) (len=32 cap=32) {
154 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
155 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
156 00000020 31 32 |12|
157 }
158
159Custom Formatter
160
161Spew provides a custom formatter that implements the fmt.Formatter interface
162so that it integrates cleanly with standard fmt package printing functions. The
163formatter is useful for inline printing of smaller data types similar to the
164standard %v format specifier.
165
166The custom formatter only responds to the %v (most compact), %+v (adds pointer
167addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
168combinations. Any other verbs such as %x and %q will be sent to the the
169standard fmt package for formatting. In addition, the custom formatter ignores
170the width and precision arguments (however they will still work on the format
171specifiers not handled by the custom formatter).
172
173Custom Formatter Usage
174
175The simplest way to make use of the spew custom formatter is to call one of the
176convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
177functions have syntax you are most likely already familiar with:
178
179 spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
180 spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
181 spew.Println(myVar, myVar2)
182 spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
183 spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
184
185See the Index for the full list convenience functions.
186
187Sample Formatter Output
188
189Double pointer to a uint8:
190 %v: <**>5
191 %+v: <**>(0xf8400420d0->0xf8400420c8)5
192 %#v: (**uint8)5
193 %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
194
195Pointer to circular struct with a uint8 field and a pointer to itself:
196 %v: <*>{1 <*><shown>}
197 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
198 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
199 %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
200
201See the Printf example for details on the setup of variables being shown
202here.
203
204Errors
205
206Since it is possible for custom Stringer/error interfaces to panic, spew
207detects them and handles them internally by printing the panic information
208inline with the output. Since spew is intended to provide deep pretty printing
209capabilities on structures, it intentionally does not return any errors.
210*/
211package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..df1d582
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
1/*
2 * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17package spew
18
19import (
20 "bytes"
21 "encoding/hex"
22 "fmt"
23 "io"
24 "os"
25 "reflect"
26 "regexp"
27 "strconv"
28 "strings"
29)
30
31var (
32 // uint8Type is a reflect.Type representing a uint8. It is used to
33 // convert cgo types to uint8 slices for hexdumping.
34 uint8Type = reflect.TypeOf(uint8(0))
35
36 // cCharRE is a regular expression that matches a cgo char.
37 // It is used to detect character arrays to hexdump them.
38 cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
39
40 // cUnsignedCharRE is a regular expression that matches a cgo unsigned
41 // char. It is used to detect unsigned character arrays to hexdump
42 // them.
43 cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
44
45 // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
46 // It is used to detect uint8_t arrays to hexdump them.
47 cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
48)
49
50// dumpState contains information about the state of a dump operation.
51type dumpState struct {
52 w io.Writer
53 depth int
54 pointers map[uintptr]int
55 ignoreNextType bool
56 ignoreNextIndent bool
57 cs *ConfigState
58}
59
60// indent performs indentation according to the depth level and cs.Indent
61// option.
62func (d *dumpState) indent() {
63 if d.ignoreNextIndent {
64 d.ignoreNextIndent = false
65 return
66 }
67 d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
68}
69
70// unpackValue returns values inside of non-nil interfaces when possible.
71// This is useful for data types like structs, arrays, slices, and maps which
72// can contain varying types packed inside an interface.
73func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
74 if v.Kind() == reflect.Interface && !v.IsNil() {
75 v = v.Elem()
76 }
77 return v
78}
79
80// dumpPtr handles formatting of pointers by indirecting them as necessary.
81func (d *dumpState) dumpPtr(v reflect.Value) {
82 // Remove pointers at or below the current depth from map used to detect
83 // circular refs.
84 for k, depth := range d.pointers {
85 if depth >= d.depth {
86 delete(d.pointers, k)
87 }
88 }
89
90 // Keep list of all dereferenced pointers to show later.
91 pointerChain := make([]uintptr, 0)
92
93 // Figure out how many levels of indirection there are by dereferencing
94 // pointers and unpacking interfaces down the chain while detecting circular
95 // references.
96 nilFound := false
97 cycleFound := false
98 indirects := 0
99 ve := v
100 for ve.Kind() == reflect.Ptr {
101 if ve.IsNil() {
102 nilFound = true
103 break
104 }
105 indirects++
106 addr := ve.Pointer()
107 pointerChain = append(pointerChain, addr)
108 if pd, ok := d.pointers[addr]; ok && pd < d.depth {
109 cycleFound = true
110 indirects--
111 break
112 }
113 d.pointers[addr] = d.depth
114
115 ve = ve.Elem()
116 if ve.Kind() == reflect.Interface {
117 if ve.IsNil() {
118 nilFound = true
119 break
120 }
121 ve = ve.Elem()
122 }
123 }
124
125 // Display type information.
126 d.w.Write(openParenBytes)
127 d.w.Write(bytes.Repeat(asteriskBytes, indirects))
128 d.w.Write([]byte(ve.Type().String()))
129 d.w.Write(closeParenBytes)
130
131 // Display pointer information.
132 if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
133 d.w.Write(openParenBytes)
134 for i, addr := range pointerChain {
135 if i > 0 {
136 d.w.Write(pointerChainBytes)
137 }
138 printHexPtr(d.w, addr)
139 }
140 d.w.Write(closeParenBytes)
141 }
142
143 // Display dereferenced value.
144 d.w.Write(openParenBytes)
145 switch {
146 case nilFound == true:
147 d.w.Write(nilAngleBytes)
148
149 case cycleFound == true:
150 d.w.Write(circularBytes)
151
152 default:
153 d.ignoreNextType = true
154 d.dump(ve)
155 }
156 d.w.Write(closeParenBytes)
157}
158
159// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
160// reflection) arrays and slices are dumped in hexdump -C fashion.
161func (d *dumpState) dumpSlice(v reflect.Value) {
162 // Determine whether this type should be hex dumped or not. Also,
163 // for types which should be hexdumped, try to use the underlying data
164 // first, then fall back to trying to convert them to a uint8 slice.
165 var buf []uint8
166 doConvert := false
167 doHexDump := false
168 numEntries := v.Len()
169 if numEntries > 0 {
170 vt := v.Index(0).Type()
171 vts := vt.String()
172 switch {
173 // C types that need to be converted.
174 case cCharRE.MatchString(vts):
175 fallthrough
176 case cUnsignedCharRE.MatchString(vts):
177 fallthrough
178 case cUint8tCharRE.MatchString(vts):
179 doConvert = true
180
181 // Try to use existing uint8 slices and fall back to converting
182 // and copying if that fails.
183 case vt.Kind() == reflect.Uint8:
184 // We need an addressable interface to convert the type
185 // to a byte slice. However, the reflect package won't
186 // give us an interface on certain things like
187 // unexported struct fields in order to enforce
188 // visibility rules. We use unsafe, when available, to
189 // bypass these restrictions since this package does not
190 // mutate the values.
191 vs := v
192 if !vs.CanInterface() || !vs.CanAddr() {
193 vs = unsafeReflectValue(vs)
194 }
195 if !UnsafeDisabled {
196 vs = vs.Slice(0, numEntries)
197
198 // Use the existing uint8 slice if it can be
199 // type asserted.
200 iface := vs.Interface()
201 if slice, ok := iface.([]uint8); ok {
202 buf = slice
203 doHexDump = true
204 break
205 }
206 }
207
208 // The underlying data needs to be converted if it can't
209 // be type asserted to a uint8 slice.
210 doConvert = true
211 }
212
213 // Copy and convert the underlying type if needed.
214 if doConvert && vt.ConvertibleTo(uint8Type) {
215 // Convert and copy each element into a uint8 byte
216 // slice.
217 buf = make([]uint8, numEntries)
218 for i := 0; i < numEntries; i++ {
219 vv := v.Index(i)
220 buf[i] = uint8(vv.Convert(uint8Type).Uint())
221 }
222 doHexDump = true
223 }
224 }
225
226 // Hexdump the entire slice as needed.
227 if doHexDump {
228 indent := strings.Repeat(d.cs.Indent, d.depth)
229 str := indent + hex.Dump(buf)
230 str = strings.Replace(str, "\n", "\n"+indent, -1)
231 str = strings.TrimRight(str, d.cs.Indent)
232 d.w.Write([]byte(str))
233 return
234 }
235
236 // Recursively call dump for each item.
237 for i := 0; i < numEntries; i++ {
238 d.dump(d.unpackValue(v.Index(i)))
239 if i < (numEntries - 1) {
240 d.w.Write(commaNewlineBytes)
241 } else {
242 d.w.Write(newlineBytes)
243 }
244 }
245}
246
247// dump is the main workhorse for dumping a value. It uses the passed reflect
248// value to figure out what kind of object we are dealing with and formats it
249// appropriately. It is a recursive function, however circular data structures
250// are detected and handled properly.
251func (d *dumpState) dump(v reflect.Value) {
252 // Handle invalid reflect values immediately.
253 kind := v.Kind()
254 if kind == reflect.Invalid {
255 d.w.Write(invalidAngleBytes)
256 return
257 }
258
259 // Handle pointers specially.
260 if kind == reflect.Ptr {
261 d.indent()
262 d.dumpPtr(v)
263 return
264 }
265
266 // Print type information unless already handled elsewhere.
267 if !d.ignoreNextType {
268 d.indent()
269 d.w.Write(openParenBytes)
270 d.w.Write([]byte(v.Type().String()))
271 d.w.Write(closeParenBytes)
272 d.w.Write(spaceBytes)
273 }
274 d.ignoreNextType = false
275
276 // Display length and capacity if the built-in len and cap functions
277 // work with the value's kind and the len/cap itself is non-zero.
278 valueLen, valueCap := 0, 0
279 switch v.Kind() {
280 case reflect.Array, reflect.Slice, reflect.Chan:
281 valueLen, valueCap = v.Len(), v.Cap()
282 case reflect.Map, reflect.String:
283 valueLen = v.Len()
284 }
285 if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
286 d.w.Write(openParenBytes)
287 if valueLen != 0 {
288 d.w.Write(lenEqualsBytes)
289 printInt(d.w, int64(valueLen), 10)
290 }
291 if !d.cs.DisableCapacities && valueCap != 0 {
292 if valueLen != 0 {
293 d.w.Write(spaceBytes)
294 }
295 d.w.Write(capEqualsBytes)
296 printInt(d.w, int64(valueCap), 10)
297 }
298 d.w.Write(closeParenBytes)
299 d.w.Write(spaceBytes)
300 }
301
302 // Call Stringer/error interfaces if they exist and the handle methods flag
303 // is enabled
304 if !d.cs.DisableMethods {
305 if (kind != reflect.Invalid) && (kind != reflect.Interface) {
306 if handled := handleMethods(d.cs, d.w, v); handled {
307 return
308 }
309 }
310 }
311
312 switch kind {
313 case reflect.Invalid:
314 // Do nothing. We should never get here since invalid has already
315 // been handled above.
316
317 case reflect.Bool:
318 printBool(d.w, v.Bool())
319
320 case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
321 printInt(d.w, v.Int(), 10)
322
323 case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
324 printUint(d.w, v.Uint(), 10)
325
326 case reflect.Float32:
327 printFloat(d.w, v.Float(), 32)
328
329 case reflect.Float64:
330 printFloat(d.w, v.Float(), 64)
331
332 case reflect.Complex64:
333 printComplex(d.w, v.Complex(), 32)
334
335 case reflect.Complex128:
336 printComplex(d.w, v.Complex(), 64)
337
338 case reflect.Slice:
339 if v.IsNil() {
340 d.w.Write(nilAngleBytes)
341 break
342 }
343 fallthrough
344
345 case reflect.Array:
346 d.w.Write(openBraceNewlineBytes)
347 d.depth++
348 if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
349 d.indent()
350 d.w.Write(maxNewlineBytes)
351 } else {
352 d.dumpSlice(v)
353 }
354 d.depth--
355 d.indent()
356 d.w.Write(closeBraceBytes)
357
358 case reflect.String:
359 d.w.Write([]byte(strconv.Quote(v.String())))
360
361 case reflect.Interface:
362 // The only time we should get here is for nil interfaces due to
363 // unpackValue calls.
364 if v.IsNil() {
365 d.w.Write(nilAngleBytes)
366 }
367
368 case reflect.Ptr:
369 // Do nothing. We should never get here since pointers have already
370 // been handled above.
371
372 case reflect.Map:
373 // nil maps should be indicated as different than empty maps
374 if v.IsNil() {
375 d.w.Write(nilAngleBytes)
376 break
377 }
378
379 d.w.Write(openBraceNewlineBytes)
380 d.depth++
381 if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
382 d.indent()
383 d.w.Write(maxNewlineBytes)
384 } else {
385 numEntries := v.Len()
386 keys := v.MapKeys()
387 if d.cs.SortKeys {
388 sortValues(keys, d.cs)
389 }
390 for i, key := range keys {
391 d.dump(d.unpackValue(key))
392 d.w.Write(colonSpaceBytes)
393 d.ignoreNextIndent = true
394 d.dump(d.unpackValue(v.MapIndex(key)))
395 if i < (numEntries - 1) {
396 d.w.Write(commaNewlineBytes)
397 } else {
398 d.w.Write(newlineBytes)
399 }
400 }
401 }
402 d.depth--
403 d.indent()
404 d.w.Write(closeBraceBytes)
405
406 case reflect.Struct:
407 d.w.Write(openBraceNewlineBytes)
408 d.depth++
409 if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
410 d.indent()
411 d.w.Write(maxNewlineBytes)
412 } else {
413 vt := v.Type()
414 numFields := v.NumField()
415 for i := 0; i < numFields; i++ {
416 d.indent()
417 vtf := vt.Field(i)
418 d.w.Write([]byte(vtf.Name))
419 d.w.Write(colonSpaceBytes)
420 d.ignoreNextIndent = true
421 d.dump(d.unpackValue(v.Field(i)))
422 if i < (numFields - 1) {
423 d.w.Write(commaNewlineBytes)
424 } else {
425 d.w.Write(newlineBytes)
426 }
427 }
428 }
429 d.depth--
430 d.indent()
431 d.w.Write(closeBraceBytes)
432
433 case reflect.Uintptr:
434 printHexPtr(d.w, uintptr(v.Uint()))
435
436 case reflect.UnsafePointer, reflect.Chan, reflect.Func:
437 printHexPtr(d.w, v.Pointer())
438
439 // There were not any other types at the time this code was written, but
440 // fall back to letting the default fmt package handle it in case any new
441 // types are added.
442 default:
443 if v.CanInterface() {
444 fmt.Fprintf(d.w, "%v", v.Interface())
445 } else {
446 fmt.Fprintf(d.w, "%v", v.String())
447 }
448 }
449}
450
451// fdump is a helper function to consolidate the logic from the various public
452// methods which take varying writers and config states.
453func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
454 for _, arg := range a {
455 if arg == nil {
456 w.Write(interfaceBytes)
457 w.Write(spaceBytes)
458 w.Write(nilAngleBytes)
459 w.Write(newlineBytes)
460 continue
461 }
462
463 d := dumpState{w: w, cs: cs}
464 d.pointers = make(map[uintptr]int)
465 d.dump(reflect.ValueOf(arg))
466 d.w.Write(newlineBytes)
467 }
468}
469
470// Fdump formats and displays the passed arguments to io.Writer w. It formats
471// exactly the same as Dump.
472func Fdump(w io.Writer, a ...interface{}) {
473 fdump(&Config, w, a...)
474}
475
476// Sdump returns a string with the passed arguments formatted exactly the same
477// as Dump.
478func Sdump(a ...interface{}) string {
479 var buf bytes.Buffer
480 fdump(&Config, &buf, a...)
481 return buf.String()
482}
483
484/*
485Dump displays the passed parameters to standard out with newlines, customizable
486indentation, and additional debug information such as complete types and all
487pointer addresses used to indirect to the final value. It provides the
488following features over the built-in printing facilities provided by the fmt
489package:
490
491 * Pointers are dereferenced and followed
492 * Circular data structures are detected and handled properly
493 * Custom Stringer/error interfaces are optionally invoked, including
494 on unexported types
495 * Custom types which only implement the Stringer/error interfaces via
496 a pointer receiver are optionally invoked when passing non-pointer
497 variables
498 * Byte arrays and slices are dumped like the hexdump -C command which
499 includes offsets, byte values in hex, and ASCII output
500
501The configuration options are controlled by an exported package global,
502spew.Config. See ConfigState for options documentation.
503
504See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
505get the formatted result as a string.
506*/
507func Dump(a ...interface{}) {
508 fdump(&Config, os.Stdout, a...)
509}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..c49875b
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
1/*
2 * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17package spew
18
19import (
20 "bytes"
21 "fmt"
22 "reflect"
23 "strconv"
24 "strings"
25)
26
27// supportedFlags is a list of all the character flags supported by fmt package.
28const supportedFlags = "0-+# "
29
30// formatState implements the fmt.Formatter interface and contains information
31// about the state of a formatting operation. The NewFormatter function can
32// be used to get a new Formatter which can be used directly as arguments
33// in standard fmt package printing calls.
34type formatState struct {
35 value interface{}
36 fs fmt.State
37 depth int
38 pointers map[uintptr]int
39 ignoreNextType bool
40 cs *ConfigState
41}
42
43// buildDefaultFormat recreates the original format string without precision
44// and width information to pass in to fmt.Sprintf in the case of an
45// unrecognized type. Unless new types are added to the language, this
46// function won't ever be called.
47func (f *formatState) buildDefaultFormat() (format string) {
48 buf := bytes.NewBuffer(percentBytes)
49
50 for _, flag := range supportedFlags {
51 if f.fs.Flag(int(flag)) {
52 buf.WriteRune(flag)
53 }
54 }
55
56 buf.WriteRune('v')
57
58 format = buf.String()
59 return format
60}
61
62// constructOrigFormat recreates the original format string including precision
63// and width information to pass along to the standard fmt package. This allows
64// automatic deferral of all format strings this package doesn't support.
65func (f *formatState) constructOrigFormat(verb rune) (format string) {
66 buf := bytes.NewBuffer(percentBytes)
67
68 for _, flag := range supportedFlags {
69 if f.fs.Flag(int(flag)) {
70 buf.WriteRune(flag)
71 }
72 }
73
74 if width, ok := f.fs.Width(); ok {
75 buf.WriteString(strconv.Itoa(width))
76 }
77
78 if precision, ok := f.fs.Precision(); ok {
79 buf.Write(precisionBytes)
80 buf.WriteString(strconv.Itoa(precision))
81 }
82
83 buf.WriteRune(verb)
84
85 format = buf.String()
86 return format
87}
88
89// unpackValue returns values inside of non-nil interfaces when possible and
90// ensures that types for values which have been unpacked from an interface
91// are displayed when the show types flag is also set.
92// This is useful for data types like structs, arrays, slices, and maps which
93// can contain varying types packed inside an interface.
94func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
95 if v.Kind() == reflect.Interface {
96 f.ignoreNextType = false
97 if !v.IsNil() {
98 v = v.Elem()
99 }
100 }
101 return v
102}
103
104// formatPtr handles formatting of pointers by indirecting them as necessary.
105func (f *formatState) formatPtr(v reflect.Value) {
106 // Display nil if top level pointer is nil.
107 showTypes := f.fs.Flag('#')
108 if v.IsNil() && (!showTypes || f.ignoreNextType) {
109 f.fs.Write(nilAngleBytes)
110 return
111 }
112
113 // Remove pointers at or below the current depth from map used to detect
114 // circular refs.
115 for k, depth := range f.pointers {
116 if depth >= f.depth {
117 delete(f.pointers, k)
118 }
119 }
120
121 // Keep list of all dereferenced pointers to possibly show later.
122 pointerChain := make([]uintptr, 0)
123
124 // Figure out how many levels of indirection there are by derferencing
125 // pointers and unpacking interfaces down the chain while detecting circular
126 // references.
127 nilFound := false
128 cycleFound := false
129 indirects := 0
130 ve := v
131 for ve.Kind() == reflect.Ptr {
132 if ve.IsNil() {
133 nilFound = true
134 break
135 }
136 indirects++
137 addr := ve.Pointer()
138 pointerChain = append(pointerChain, addr)
139 if pd, ok := f.pointers[addr]; ok && pd < f.depth {
140 cycleFound = true
141 indirects--
142 break
143 }
144 f.pointers[addr] = f.depth
145
146 ve = ve.Elem()
147 if ve.Kind() == reflect.Interface {
148 if ve.IsNil() {
149 nilFound = true
150 break
151 }
152 ve = ve.Elem()
153 }
154 }
155
156 // Display type or indirection level depending on flags.
157 if showTypes && !f.ignoreNextType {
158 f.fs.Write(openParenBytes)
159 f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
160 f.fs.Write([]byte(ve.Type().String()))
161 f.fs.Write(closeParenBytes)
162 } else {
163 if nilFound || cycleFound {
164 indirects += strings.Count(ve.Type().String(), "*")
165 }
166 f.fs.Write(openAngleBytes)
167 f.fs.Write([]byte(strings.Repeat("*", indirects)))
168 f.fs.Write(closeAngleBytes)
169 }
170
171 // Display pointer information depending on flags.
172 if f.fs.Flag('+') && (len(pointerChain) > 0) {
173 f.fs.Write(openParenBytes)
174 for i, addr := range pointerChain {
175 if i > 0 {
176 f.fs.Write(pointerChainBytes)
177 }
178 printHexPtr(f.fs, addr)
179 }
180 f.fs.Write(closeParenBytes)
181 }
182
183 // Display dereferenced value.
184 switch {
185 case nilFound == true:
186 f.fs.Write(nilAngleBytes)
187
188 case cycleFound == true:
189 f.fs.Write(circularShortBytes)
190
191 default:
192 f.ignoreNextType = true
193 f.format(ve)
194 }
195}
196
197// format is the main workhorse for providing the Formatter interface. It
198// uses the passed reflect value to figure out what kind of object we are
199// dealing with and formats it appropriately. It is a recursive function,
200// however circular data structures are detected and handled properly.
201func (f *formatState) format(v reflect.Value) {
202 // Handle invalid reflect values immediately.
203 kind := v.Kind()
204 if kind == reflect.Invalid {
205 f.fs.Write(invalidAngleBytes)
206 return
207 }
208
209 // Handle pointers specially.
210 if kind == reflect.Ptr {
211 f.formatPtr(v)
212 return
213 }
214
215 // Print type information unless already handled elsewhere.
216 if !f.ignoreNextType && f.fs.Flag('#') {
217 f.fs.Write(openParenBytes)
218 f.fs.Write([]byte(v.Type().String()))
219 f.fs.Write(closeParenBytes)
220 }
221 f.ignoreNextType = false
222
223 // Call Stringer/error interfaces if they exist and the handle methods
224 // flag is enabled.
225 if !f.cs.DisableMethods {
226 if (kind != reflect.Invalid) && (kind != reflect.Interface) {
227 if handled := handleMethods(f.cs, f.fs, v); handled {
228 return
229 }
230 }
231 }
232
233 switch kind {
234 case reflect.Invalid:
235 // Do nothing. We should never get here since invalid has already
236 // been handled above.
237
238 case reflect.Bool:
239 printBool(f.fs, v.Bool())
240
241 case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
242 printInt(f.fs, v.Int(), 10)
243
244 case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
245 printUint(f.fs, v.Uint(), 10)
246
247 case reflect.Float32:
248 printFloat(f.fs, v.Float(), 32)
249
250 case reflect.Float64:
251 printFloat(f.fs, v.Float(), 64)
252
253 case reflect.Complex64:
254 printComplex(f.fs, v.Complex(), 32)
255
256 case reflect.Complex128:
257 printComplex(f.fs, v.Complex(), 64)
258
259 case reflect.Slice:
260 if v.IsNil() {
261 f.fs.Write(nilAngleBytes)
262 break
263 }
264 fallthrough
265
266 case reflect.Array:
267 f.fs.Write(openBracketBytes)
268 f.depth++
269 if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
270 f.fs.Write(maxShortBytes)
271 } else {
272 numEntries := v.Len()
273 for i := 0; i < numEntries; i++ {
274 if i > 0 {
275 f.fs.Write(spaceBytes)
276 }
277 f.ignoreNextType = true
278 f.format(f.unpackValue(v.Index(i)))
279 }
280 }
281 f.depth--
282 f.fs.Write(closeBracketBytes)
283
284 case reflect.String:
285 f.fs.Write([]byte(v.String()))
286
287 case reflect.Interface:
288 // The only time we should get here is for nil interfaces due to
289 // unpackValue calls.
290 if v.IsNil() {
291 f.fs.Write(nilAngleBytes)
292 }
293
294 case reflect.Ptr:
295 // Do nothing. We should never get here since pointers have already
296 // been handled above.
297
298 case reflect.Map:
299 // nil maps should be indicated as different than empty maps
300 if v.IsNil() {
301 f.fs.Write(nilAngleBytes)
302 break
303 }
304
305 f.fs.Write(openMapBytes)
306 f.depth++
307 if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
308 f.fs.Write(maxShortBytes)
309 } else {
310 keys := v.MapKeys()
311 if f.cs.SortKeys {
312 sortValues(keys, f.cs)
313 }
314 for i, key := range keys {
315 if i > 0 {
316 f.fs.Write(spaceBytes)
317 }
318 f.ignoreNextType = true
319 f.format(f.unpackValue(key))
320 f.fs.Write(colonBytes)
321 f.ignoreNextType = true
322 f.format(f.unpackValue(v.MapIndex(key)))
323 }
324 }
325 f.depth--
326 f.fs.Write(closeMapBytes)
327
328 case reflect.Struct:
329 numFields := v.NumField()
330 f.fs.Write(openBraceBytes)
331 f.depth++
332 if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
333 f.fs.Write(maxShortBytes)
334 } else {
335 vt := v.Type()
336 for i := 0; i < numFields; i++ {
337 if i > 0 {
338 f.fs.Write(spaceBytes)
339 }
340 vtf := vt.Field(i)
341 if f.fs.Flag('+') || f.fs.Flag('#') {
342 f.fs.Write([]byte(vtf.Name))
343 f.fs.Write(colonBytes)
344 }
345 f.format(f.unpackValue(v.Field(i)))
346 }
347 }
348 f.depth--
349 f.fs.Write(closeBraceBytes)
350
351 case reflect.Uintptr:
352 printHexPtr(f.fs, uintptr(v.Uint()))
353
354 case reflect.UnsafePointer, reflect.Chan, reflect.Func:
355 printHexPtr(f.fs, v.Pointer())
356
357 // There were not any other types at the time this code was written, but
358 // fall back to letting the default fmt package handle it if any get added.
359 default:
360 format := f.buildDefaultFormat()
361 if v.CanInterface() {
362 fmt.Fprintf(f.fs, format, v.Interface())
363 } else {
364 fmt.Fprintf(f.fs, format, v.String())
365 }
366 }
367}
368
369// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
370// details.
371func (f *formatState) Format(fs fmt.State, verb rune) {
372 f.fs = fs
373
374 // Use standard formatting for verbs that are not v.
375 if verb != 'v' {
376 format := f.constructOrigFormat(verb)
377 fmt.Fprintf(fs, format, f.value)
378 return
379 }
380
381 if f.value == nil {
382 if fs.Flag('#') {
383 fs.Write(interfaceBytes)
384 }
385 fs.Write(nilAngleBytes)
386 return
387 }
388
389 f.format(reflect.ValueOf(f.value))
390}
391
392// newFormatter is a helper function to consolidate the logic from the various
393// public methods which take varying config states.
394func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
395 fs := &formatState{value: v, cs: cs}
396 fs.pointers = make(map[uintptr]int)
397 return fs
398}
399
400/*
401NewFormatter returns a custom formatter that satisfies the fmt.Formatter
402interface. As a result, it integrates cleanly with standard fmt package
403printing functions. The formatter is useful for inline printing of smaller data
404types similar to the standard %v format specifier.
405
406The custom formatter only responds to the %v (most compact), %+v (adds pointer
407addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
408combinations. Any other verbs such as %x and %q will be sent to the the
409standard fmt package for formatting. In addition, the custom formatter ignores
410the width and precision arguments (however they will still work on the format
411specifiers not handled by the custom formatter).
412
413Typically this function shouldn't be called directly. It is much easier to make
414use of the custom formatter by calling one of the convenience functions such as
415Printf, Println, or Fprintf.
416*/
417func NewFormatter(v interface{}) fmt.Formatter {
418 return newFormatter(&Config, v)
419}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..32c0e33
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
1/*
2 * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17package spew
18
19import (
20 "fmt"
21 "io"
22)
23
24// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
25// passed with a default Formatter interface returned by NewFormatter. It
26// returns the formatted string as a value that satisfies error. See
27// NewFormatter for formatting details.
28//
29// This function is shorthand for the following syntax:
30//
31// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
32func Errorf(format string, a ...interface{}) (err error) {
33 return fmt.Errorf(format, convertArgs(a)...)
34}
35
36// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
37// passed with a default Formatter interface returned by NewFormatter. It
38// returns the number of bytes written and any write error encountered. See
39// NewFormatter for formatting details.
40//
41// This function is shorthand for the following syntax:
42//
43// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
44func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
45 return fmt.Fprint(w, convertArgs(a)...)
46}
47
48// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
49// passed with a default Formatter interface returned by NewFormatter. It
50// returns the number of bytes written and any write error encountered. See
51// NewFormatter for formatting details.
52//
53// This function is shorthand for the following syntax:
54//
55// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
56func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
57 return fmt.Fprintf(w, format, convertArgs(a)...)
58}
59
60// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
61// passed with a default Formatter interface returned by NewFormatter. See
62// NewFormatter for formatting details.
63//
64// This function is shorthand for the following syntax:
65//
66// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
67func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
68 return fmt.Fprintln(w, convertArgs(a)...)
69}
70
71// Print is a wrapper for fmt.Print that treats each argument as if it were
72// passed with a default Formatter interface returned by NewFormatter. It
73// returns the number of bytes written and any write error encountered. See
74// NewFormatter for formatting details.
75//
76// This function is shorthand for the following syntax:
77//
78// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
79func Print(a ...interface{}) (n int, err error) {
80 return fmt.Print(convertArgs(a)...)
81}
82
83// Printf is a wrapper for fmt.Printf that treats each argument as if it were
84// passed with a default Formatter interface returned by NewFormatter. It
85// returns the number of bytes written and any write error encountered. See
86// NewFormatter for formatting details.
87//
88// This function is shorthand for the following syntax:
89//
90// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
91func Printf(format string, a ...interface{}) (n int, err error) {
92 return fmt.Printf(format, convertArgs(a)...)
93}
94
95// Println is a wrapper for fmt.Println that treats each argument as if it were
96// passed with a default Formatter interface returned by NewFormatter. It
97// returns the number of bytes written and any write error encountered. See
98// NewFormatter for formatting details.
99//
100// This function is shorthand for the following syntax:
101//
102// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
103func Println(a ...interface{}) (n int, err error) {
104 return fmt.Println(convertArgs(a)...)
105}
106
107// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
108// passed with a default Formatter interface returned by NewFormatter. It
109// returns the resulting string. See NewFormatter for formatting details.
110//
111// This function is shorthand for the following syntax:
112//
113// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
114func Sprint(a ...interface{}) string {
115 return fmt.Sprint(convertArgs(a)...)
116}
117
118// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
119// passed with a default Formatter interface returned by NewFormatter. It
120// returns the resulting string. See NewFormatter for formatting details.
121//
122// This function is shorthand for the following syntax:
123//
124// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
125func Sprintf(format string, a ...interface{}) string {
126 return fmt.Sprintf(format, convertArgs(a)...)
127}
128
129// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
130// were passed with a default Formatter interface returned by NewFormatter. It
131// returns the resulting string. See NewFormatter for formatting details.
132//
133// This function is shorthand for the following syntax:
134//
135// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
136func Sprintln(a ...interface{}) string {
137 return fmt.Sprintln(convertArgs(a)...)
138}
139
140// convertArgs accepts a slice of arguments and returns a slice of the same
141// length with each argument converted to a default spew Formatter interface.
142func convertArgs(args []interface{}) (formatters []interface{}) {
143 formatters = make([]interface{}, len(args))
144 for index, arg := range args {
145 formatters[index] = NewFormatter(arg)
146 }
147 return formatters
148}
diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/LICENSE
@@ -0,0 +1,191 @@
1Apache License
2Version 2.0, January 2004
3http://www.apache.org/licenses/
4
5TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
71. Definitions.
8
9"License" shall mean the terms and conditions for use, reproduction, and
10distribution as defined by Sections 1 through 9 of this document.
11
12"Licensor" shall mean the copyright owner or entity authorized by the copyright
13owner that is granting the License.
14
15"Legal Entity" shall mean the union of the acting entity and all other entities
16that control, are controlled by, or are under common control with that entity.
17For the purposes of this definition, "control" means (i) the power, direct or
18indirect, to cause the direction or management of such entity, whether by
19contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
20outstanding shares, or (iii) beneficial ownership of such entity.
21
22"You" (or "Your") shall mean an individual or Legal Entity exercising
23permissions granted by this License.
24
25"Source" form shall mean the preferred form for making modifications, including
26but not limited to software source code, documentation source, and configuration
27files.
28
29"Object" form shall mean any form resulting from mechanical transformation or
30translation of a Source form, including but not limited to compiled object code,
31generated documentation, and conversions to other media types.
32
33"Work" shall mean the work of authorship, whether in Source or Object form, made
34available under the License, as indicated by a copyright notice that is included
35in or attached to the work (an example is provided in the Appendix below).
36
37"Derivative Works" shall mean any work, whether in Source or Object form, that
38is based on (or derived from) the Work and for which the editorial revisions,
39annotations, elaborations, or other modifications represent, as a whole, an
40original work of authorship. For the purposes of this License, Derivative Works
41shall not include works that remain separable from, or merely link (or bind by
42name) to the interfaces of, the Work and Derivative Works thereof.
43
44"Contribution" shall mean any work of authorship, including the original version
45of the Work and any modifications or additions to that Work or Derivative Works
46thereof, that is intentionally submitted to Licensor for inclusion in the Work
47by the copyright owner or by an individual or Legal Entity authorized to submit
48on behalf of the copyright owner. For the purposes of this definition,
49"submitted" means any form of electronic, verbal, or written communication sent
50to the Licensor or its representatives, including but not limited to
51communication on electronic mailing lists, source code control systems, and
52issue tracking systems that are managed by, or on behalf of, the Licensor for
53the purpose of discussing and improving the Work, but excluding communication
54that is conspicuously marked or otherwise designated in writing by the copyright
55owner as "Not a Contribution."
56
57"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
58of whom a Contribution has been received by Licensor and subsequently
59incorporated within the Work.
60
612. Grant of Copyright License.
62
63Subject to the terms and conditions of this License, each Contributor hereby
64grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
65irrevocable copyright license to reproduce, prepare Derivative Works of,
66publicly display, publicly perform, sublicense, and distribute the Work and such
67Derivative Works in Source or Object form.
68
693. Grant of Patent License.
70
71Subject to the terms and conditions of this License, each Contributor hereby
72grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
73irrevocable (except as stated in this section) patent license to make, have
74made, use, offer to sell, sell, import, and otherwise transfer the Work, where
75such license applies only to those patent claims licensable by such Contributor
76that are necessarily infringed by their Contribution(s) alone or by combination
77of their Contribution(s) with the Work to which such Contribution(s) was
78submitted. If You institute patent litigation against any entity (including a
79cross-claim or counterclaim in a lawsuit) alleging that the Work or a
80Contribution incorporated within the Work constitutes direct or contributory
81patent infringement, then any patent licenses granted to You under this License
82for that Work shall terminate as of the date such litigation is filed.
83
844. Redistribution.
85
86You may reproduce and distribute copies of the Work or Derivative Works thereof
87in any medium, with or without modifications, and in Source or Object form,
88provided that You meet the following conditions:
89
90You must give any other recipients of the Work or Derivative Works a copy of
91this License; and
92You must cause any modified files to carry prominent notices stating that You
93changed the files; and
94You must retain, in the Source form of any Derivative Works that You distribute,
95all copyright, patent, trademark, and attribution notices from the Source form
96of the Work, excluding those notices that do not pertain to any part of the
97Derivative Works; and
98If the Work includes a "NOTICE" text file as part of its distribution, then any
99Derivative Works that You distribute must include a readable copy of the
100attribution notices contained within such NOTICE file, excluding those notices
101that do not pertain to any part of the Derivative Works, in at least one of the
102following places: within a NOTICE text file distributed as part of the
103Derivative Works; within the Source form or documentation, if provided along
104with the Derivative Works; or, within a display generated by the Derivative
105Works, if and wherever such third-party notices normally appear. The contents of
106the NOTICE file are for informational purposes only and do not modify the
107License. You may add Your own attribution notices within Derivative Works that
108You distribute, alongside or as an addendum to the NOTICE text from the Work,
109provided that such additional attribution notices cannot be construed as
110modifying the License.
111You may add Your own copyright statement to Your modifications and may provide
112additional or different license terms and conditions for use, reproduction, or
113distribution of Your modifications, or for any such Derivative Works as a whole,
114provided Your use, reproduction, and distribution of the Work otherwise complies
115with the conditions stated in this License.
116
1175. Submission of Contributions.
118
119Unless You explicitly state otherwise, any Contribution intentionally submitted
120for inclusion in the Work by You to the Licensor shall be under the terms and
121conditions of this License, without any additional terms or conditions.
122Notwithstanding the above, nothing herein shall supersede or modify the terms of
123any separate license agreement you may have executed with Licensor regarding
124such Contributions.
125
1266. Trademarks.
127
128This License does not grant permission to use the trade names, trademarks,
129service marks, or product names of the Licensor, except as required for
130reasonable and customary use in describing the origin of the Work and
131reproducing the content of the NOTICE file.
132
1337. Disclaimer of Warranty.
134
135Unless required by applicable law or agreed to in writing, Licensor provides the
136Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
137WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
138including, without limitation, any warranties or conditions of TITLE,
139NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
140solely responsible for determining the appropriateness of using or
141redistributing the Work and assume any risks associated with Your exercise of
142permissions under this License.
143
1448. Limitation of Liability.
145
146In no event and under no legal theory, whether in tort (including negligence),
147contract, or otherwise, unless required by applicable law (such as deliberate
148and grossly negligent acts) or agreed to in writing, shall any Contributor be
149liable to You for damages, including any direct, indirect, special, incidental,
150or consequential damages of any character arising as a result of this License or
151out of the use or inability to use the Work (including but not limited to
152damages for loss of goodwill, work stoppage, computer failure or malfunction, or
153any and all other commercial damages or losses), even if such Contributor has
154been advised of the possibility of such damages.
155
1569. Accepting Warranty or Additional Liability.
157
158While redistributing the Work or Derivative Works thereof, You may choose to
159offer, and charge a fee for, acceptance of support, warranty, indemnity, or
160other liability obligations and/or rights consistent with this License. However,
161in accepting such obligations, You may act only on Your own behalf and on Your
162sole responsibility, not on behalf of any other Contributor, and only if You
163agree to indemnify, defend, and hold each Contributor harmless for any liability
164incurred by, or claims asserted against, such Contributor by reason of your
165accepting any such warranty or additional liability.
166
167END OF TERMS AND CONDITIONS
168
169APPENDIX: How to apply the Apache License to your work
170
171To apply the Apache License to your work, attach the following boilerplate
172notice, with the fields enclosed by brackets "[]" replaced with your own
173identifying information. (Don't include the brackets!) The text should be
174enclosed in the appropriate comment syntax for the file format. We also
175recommend that a file or class name and description of purpose be included on
176the same "printed page" as the copyright notice for easier identification within
177third-party archives.
178
179 Copyright [yyyy] [name of copyright owner]
180
181 Licensed under the Apache License, Version 2.0 (the "License");
182 you may not use this file except in compliance with the License.
183 You may obtain a copy of the License at
184
185 http://www.apache.org/licenses/LICENSE-2.0
186
187 Unless required by applicable law or agreed to in writing, software
188 distributed under the License is distributed on an "AS IS" BASIS,
189 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
190 See the License for the specific language governing permissions and
191 limitations under the License.
diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile
new file mode 100644
index 0000000..ac034e5
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/Makefile
@@ -0,0 +1,12 @@
1.PHONY: build test bench vet
2
3build: vet bench
4
5test:
6 go test -v -cover -race
7
8bench:
9 go test -v -cover -race -test.bench=. -test.benchmem
10
11vet:
12 go vet
diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md
new file mode 100644
index 0000000..22a4234
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/README.md
@@ -0,0 +1,734 @@
1INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini)
2===
3
4![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
5
6Package ini provides INI file read and write functionality in Go.
7
8[简体中文](README_ZH.md)
9
10## Feature
11
12- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
13- Read with recursion values.
14- Read with parent-child sections.
15- Read with auto-increment key names.
16- Read with multiple-line values.
17- Read with tons of helper methods.
18- Read and convert values to Go types.
19- Read and **WRITE** comments of sections and keys.
20- Manipulate sections, keys and comments with ease.
21- Keep sections and keys in order as you parse and save.
22
23## Installation
24
25To use a tagged revision:
26
27 go get gopkg.in/ini.v1
28
29To use with latest changes:
30
31 go get github.com/go-ini/ini
32
33Please add `-u` flag to update in the future.
34
35### Testing
36
37If you want to test on your machine, please apply `-t` flag:
38
39 go get -t gopkg.in/ini.v1
40
41Please add `-u` flag to update in the future.
42
43## Getting Started
44
45### Loading from data sources
46
47A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error.
48
49```go
50cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
51```
52
53Or start with an empty object:
54
55```go
56cfg := ini.Empty()
57```
58
59When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later.
60
61```go
62err := cfg.Append("other file", []byte("other raw data"))
63```
64
65If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error.
66
67```go
68cfg, err := ini.LooseLoad("filename", "filename_404")
69```
70
71The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual.
72
73#### Ignore cases of key name
74
75When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing.
76
77```go
78cfg, err := ini.InsensitiveLoad("filename")
79//...
80
81// sec1 and sec2 are the exactly same section object
82sec1, err := cfg.GetSection("Section")
83sec2, err := cfg.GetSection("SecTIOn")
84
85// key1 and key2 are the exactly same key object
86key1, err := cfg.GetKey("Key")
87key2, err := cfg.GetKey("KeY")
88```
89
90#### MySQL-like boolean key
91
92MySQL's configuration allows a key without value as follows:
93
94```ini
95[mysqld]
96...
97skip-host-cache
98skip-name-resolve
99```
100
101By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options:
102
103```go
104cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
105```
106
107The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read.
108
109#### Comment
110
111Take care that following format will be treated as comment:
112
1131. Line begins with `#` or `;`
1142. Words after `#` or `;`
1153. Words after section name (i.e words after `[some section name]`)
116
117If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```.
118
119### Working with sections
120
121To get a section, you would need to:
122
123```go
124section, err := cfg.GetSection("section name")
125```
126
127For a shortcut for default section, just give an empty string as name:
128
129```go
130section, err := cfg.GetSection("")
131```
132
133When you're pretty sure the section exists, following code could make your life easier:
134
135```go
136section := cfg.Section("section name")
137```
138
139What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
140
141To create a new section:
142
143```go
144err := cfg.NewSection("new section")
145```
146
147To get a list of sections or section names:
148
149```go
150sections := cfg.Sections()
151names := cfg.SectionStrings()
152```
153
154### Working with keys
155
156To get a key under a section:
157
158```go
159key, err := cfg.Section("").GetKey("key name")
160```
161
162Same rule applies to key operations:
163
164```go
165key := cfg.Section("").Key("key name")
166```
167
168To check if a key exists:
169
170```go
171yes := cfg.Section("").HasKey("key name")
172```
173
174To create a new key:
175
176```go
177err := cfg.Section("").NewKey("name", "value")
178```
179
180To get a list of keys or key names:
181
182```go
183keys := cfg.Section("").Keys()
184names := cfg.Section("").KeyStrings()
185```
186
187To get a clone hash of keys and corresponding values:
188
189```go
190hash := cfg.Section("").KeysHash()
191```
192
193### Working with values
194
195To get a string value:
196
197```go
198val := cfg.Section("").Key("key name").String()
199```
200
201To validate key value on the fly:
202
203```go
204val := cfg.Section("").Key("key name").Validate(func(in string) string {
205 if len(in) == 0 {
206 return "default"
207 }
208 return in
209})
210```
211
212If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance):
213
214```go
215val := cfg.Section("").Key("key name").Value()
216```
217
218To check if raw value exists:
219
220```go
221yes := cfg.Section("").HasValue("test value")
222```
223
224To get value with types:
225
226```go
227// For boolean values:
228// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
229// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
230v, err = cfg.Section("").Key("BOOL").Bool()
231v, err = cfg.Section("").Key("FLOAT64").Float64()
232v, err = cfg.Section("").Key("INT").Int()
233v, err = cfg.Section("").Key("INT64").Int64()
234v, err = cfg.Section("").Key("UINT").Uint()
235v, err = cfg.Section("").Key("UINT64").Uint64()
236v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
237v, err = cfg.Section("").Key("TIME").Time() // RFC3339
238
239v = cfg.Section("").Key("BOOL").MustBool()
240v = cfg.Section("").Key("FLOAT64").MustFloat64()
241v = cfg.Section("").Key("INT").MustInt()
242v = cfg.Section("").Key("INT64").MustInt64()
243v = cfg.Section("").Key("UINT").MustUint()
244v = cfg.Section("").Key("UINT64").MustUint64()
245v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
246v = cfg.Section("").Key("TIME").MustTime() // RFC3339
247
248// Methods start with Must also accept one argument for default value
249// when key not found or fail to parse value to given type.
250// Except method MustString, which you have to pass a default value.
251
252v = cfg.Section("").Key("String").MustString("default")
253v = cfg.Section("").Key("BOOL").MustBool(true)
254v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
255v = cfg.Section("").Key("INT").MustInt(10)
256v = cfg.Section("").Key("INT64").MustInt64(99)
257v = cfg.Section("").Key("UINT").MustUint(3)
258v = cfg.Section("").Key("UINT64").MustUint64(6)
259v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
260v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
261```
262
263What if my value is three-line long?
264
265```ini
266[advance]
267ADDRESS = """404 road,
268NotFound, State, 5000
269Earth"""
270```
271
272Not a problem!
273
274```go
275cfg.Section("advance").Key("ADDRESS").String()
276
277/* --- start ---
278404 road,
279NotFound, State, 5000
280Earth
281------ end --- */
282```
283
284That's cool, how about continuation lines?
285
286```ini
287[advance]
288two_lines = how about \
289 continuation lines?
290lots_of_lines = 1 \
291 2 \
292 3 \
293 4
294```
295
296Piece of cake!
297
298```go
299cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
300cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
301```
302
303Well, I hate continuation lines, how do I disable that?
304
305```go
306cfg, err := ini.LoadSources(ini.LoadOptions{
307 IgnoreContinuation: true,
308}, "filename")
309```
310
311Holy crap!
312
313Note that single quotes around values will be stripped:
314
315```ini
316foo = "some value" // foo: some value
317bar = 'some value' // bar: some value
318```
319
320That's all? Hmm, no.
321
322#### Helper methods of working with values
323
324To get value with given candidates:
325
326```go
327v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
328v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
329v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
330v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
331v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
332v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
333v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
334v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
335```
336
337Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
338
339To validate value in a given range:
340
341```go
342vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
343vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
344vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
345vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
346vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
347vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
348vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
349```
350
351##### Auto-split values into a slice
352
353To use zero value of type for invalid inputs:
354
355```go
356// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
357// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
358vals = cfg.Section("").Key("STRINGS").Strings(",")
359vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
360vals = cfg.Section("").Key("INTS").Ints(",")
361vals = cfg.Section("").Key("INT64S").Int64s(",")
362vals = cfg.Section("").Key("UINTS").Uints(",")
363vals = cfg.Section("").Key("UINT64S").Uint64s(",")
364vals = cfg.Section("").Key("TIMES").Times(",")
365```
366
367To exclude invalid values out of result slice:
368
369```go
370// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
371// Input: how, 2.2, are, you -> [2.2]
372vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
373vals = cfg.Section("").Key("INTS").ValidInts(",")
374vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
375vals = cfg.Section("").Key("UINTS").ValidUints(",")
376vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
377vals = cfg.Section("").Key("TIMES").ValidTimes(",")
378```
379
380Or to return nothing but error when have invalid inputs:
381
382```go
383// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
384// Input: how, 2.2, are, you -> error
385vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
386vals = cfg.Section("").Key("INTS").StrictInts(",")
387vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
388vals = cfg.Section("").Key("UINTS").StrictUints(",")
389vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
390vals = cfg.Section("").Key("TIMES").StrictTimes(",")
391```
392
393### Save your configuration
394
395Finally, it's time to save your configuration to somewhere.
396
397A typical way to save configuration is writing it to a file:
398
399```go
400// ...
401err = cfg.SaveTo("my.ini")
402err = cfg.SaveToIndent("my.ini", "\t")
403```
404
405Another way to save is writing to a `io.Writer` interface:
406
407```go
408// ...
409cfg.WriteTo(writer)
410cfg.WriteToIndent(writer, "\t")
411```
412
413By default, spaces are used to align "=" sign between key and values, to disable that:
414
415```go
416ini.PrettyFormat = false
417```
418
419## Advanced Usage
420
421### Recursive Values
422
423For all value of keys, there is a special syntax `%(<name>)s`, where `<name>` is the key name in same section or default section, and `%(<name>)s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
424
425```ini
426NAME = ini
427
428[author]
429NAME = Unknwon
430GITHUB = https://github.com/%(NAME)s
431
432[package]
433FULL_NAME = github.com/go-ini/%(NAME)s
434```
435
436```go
437cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
438cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
439```
440
441### Parent-child Sections
442
443You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
444
445```ini
446NAME = ini
447VERSION = v1
448IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
449
450[package]
451CLONE_URL = https://%(IMPORT_PATH)s
452
453[package.sub]
454```
455
456```go
457cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
458```
459
460#### Retrieve parent keys available to a child section
461
462```go
463cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
464```
465
466### Unparseable Sections
467
468Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`:
469
470```go
471cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
472<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
473
474body := cfg.Section("COMMENTS").Body()
475
476/* --- start ---
477<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
478------ end --- */
479```
480
481### Auto-increment Key Names
482
483If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
484
485```ini
486[features]
487-: Support read/write comments of keys and sections
488-: Support auto-increment of key names
489-: Support load multiple files to overwrite key values
490```
491
492```go
493cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
494```
495
496### Map To Struct
497
498Want more objective way to play with INI? Cool.
499
500```ini
501Name = Unknwon
502age = 21
503Male = true
504Born = 1993-01-01T20:17:05Z
505
506[Note]
507Content = Hi is a good man!
508Cities = HangZhou, Boston
509```
510
511```go
512type Note struct {
513 Content string
514 Cities []string
515}
516
517type Person struct {
518 Name string
519 Age int `ini:"age"`
520 Male bool
521 Born time.Time
522 Note
523 Created time.Time `ini:"-"`
524}
525
526func main() {
527 cfg, err := ini.Load("path/to/ini")
528 // ...
529 p := new(Person)
530 err = cfg.MapTo(p)
531 // ...
532
533 // Things can be simpler.
534 err = ini.MapTo(p, "path/to/ini")
535 // ...
536
537 // Just map a section? Fine.
538 n := new(Note)
539 err = cfg.Section("Note").MapTo(n)
540 // ...
541}
542```
543
544Can I have default value for field? Absolutely.
545
546Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
547
548```go
549// ...
550p := &Person{
551 Name: "Joe",
552}
553// ...
554```
555
556It's really cool, but what's the point if you can't give me my file back from struct?
557
558### Reflect From Struct
559
560Why not?
561
562```go
563type Embeded struct {
564 Dates []time.Time `delim:"|"`
565 Places []string `ini:"places,omitempty"`
566 None []int `ini:",omitempty"`
567}
568
569type Author struct {
570 Name string `ini:"NAME"`
571 Male bool
572 Age int
573 GPA float64
574 NeverMind string `ini:"-"`
575 *Embeded
576}
577
578func main() {
579 a := &Author{"Unknwon", true, 21, 2.8, "",
580 &Embeded{
581 []time.Time{time.Now(), time.Now()},
582 []string{"HangZhou", "Boston"},
583 []int{},
584 }}
585 cfg := ini.Empty()
586 err = ini.ReflectFrom(cfg, a)
587 // ...
588}
589```
590
591So, what do I get?
592
593```ini
594NAME = Unknwon
595Male = true
596Age = 21
597GPA = 2.8
598
599[Embeded]
600Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
601places = HangZhou,Boston
602```
603
604#### Name Mapper
605
606To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
607
608There are 2 built-in name mappers:
609
610- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
611- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
612
613To use them:
614
615```go
616type Info struct {
617 PackageName string
618}
619
620func main() {
621 err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
622 // ...
623
624 cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
625 // ...
626 info := new(Info)
627 cfg.NameMapper = ini.AllCapsUnderscore
628 err = cfg.MapTo(info)
629 // ...
630}
631```
632
633Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
634
635#### Value Mapper
636
637To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values:
638
639```go
640type Env struct {
641 Foo string `ini:"foo"`
642}
643
644func main() {
645 cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
646 cfg.ValueMapper = os.ExpandEnv
647 // ...
648 env := &Env{}
649 err = cfg.Section("env").MapTo(env)
650}
651```
652
653This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`.
654
655#### Other Notes On Map/Reflect
656
657Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
658
659```go
660type Child struct {
661 Age string
662}
663
664type Parent struct {
665 Name string
666 Child
667}
668
669type Config struct {
670 City string
671 Parent
672}
673```
674
675Example configuration:
676
677```ini
678City = Boston
679
680[Parent]
681Name = Unknwon
682
683[Child]
684Age = 21
685```
686
687What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
688
689```go
690type Child struct {
691 Age string
692}
693
694type Parent struct {
695 Name string
696 Child `ini:"Parent"`
697}
698
699type Config struct {
700 City string
701 Parent
702}
703```
704
705Example configuration:
706
707```ini
708City = Boston
709
710[Parent]
711Name = Unknwon
712Age = 21
713```
714
715## Getting Help
716
717- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
718- [File An Issue](https://github.com/go-ini/ini/issues/new)
719
720## FAQs
721
722### What does `BlockMode` field do?
723
724By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
725
726### Why another INI library?
727
728Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
729
730To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
731
732## License
733
734This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md
new file mode 100644
index 0000000..3b4fb66
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/README_ZH.md
@@ -0,0 +1,721 @@
1本包提供了 Go 语言中读写 INI 文件的功能。
2
3## 功能特性
4
5- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`)
6- 支持递归读取键值
7- 支持读取父子分区
8- 支持读取自增键名
9- 支持读取多行的键值
10- 支持大量辅助方法
11- 支持在读取时直接转换为 Go 语言类型
12- 支持读取和 **写入** 分区和键的注释
13- 轻松操作分区、键值和注释
14- 在保存文件时分区和键值会保持原有的顺序
15
16## 下载安装
17
18使用一个特定版本:
19
20 go get gopkg.in/ini.v1
21
22使用最新版:
23
24 go get github.com/go-ini/ini
25
26如需更新请添加 `-u` 选项。
27
28### 测试安装
29
30如果您想要在自己的机器上运行测试,请使用 `-t` 标记:
31
32 go get -t gopkg.in/ini.v1
33
34如需更新请添加 `-u` 选项。
35
36## 开始使用
37
38### 从数据源加载
39
40一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
41
42```go
43cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
44```
45
46或者从一个空白的文件开始:
47
48```go
49cfg := ini.Empty()
50```
51
52当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
53
54```go
55err := cfg.Append("other file", []byte("other raw data"))
56```
57
58当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误):
59
60```go
61cfg, err := ini.LooseLoad("filename", "filename_404")
62```
63
64更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。
65
66#### 忽略键名的大小写
67
68有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写:
69
70```go
71cfg, err := ini.InsensitiveLoad("filename")
72//...
73
74// sec1 和 sec2 指向同一个分区对象
75sec1, err := cfg.GetSection("Section")
76sec2, err := cfg.GetSection("SecTIOn")
77
78// key1 和 key2 指向同一个键对象
79key1, err := cfg.GetKey("Key")
80key2, err := cfg.GetKey("KeY")
81```
82
83#### 类似 MySQL 配置中的布尔值键
84
85MySQL 的配置文件中会出现没有具体值的布尔类型的键:
86
87```ini
88[mysqld]
89...
90skip-host-cache
91skip-name-resolve
92```
93
94默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理:
95
96```go
97cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
98```
99
100这些键的值永远为 `true`,且在保存到文件时也只会输出键名。
101
102#### 关于注释
103
104下述几种情况的内容将被视为注释:
105
1061. 所有以 `#` 或 `;` 开头的行
1072. 所有在 `#` 或 `;` 之后的内容
1083. 分区标签后的文字 (即 `[分区名]` 之后的内容)
109
110如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。
111
112### 操作分区(Section)
113
114获取指定分区:
115
116```go
117section, err := cfg.GetSection("section name")
118```
119
120如果您想要获取默认分区,则可以用空字符串代替分区名:
121
122```go
123section, err := cfg.GetSection("")
124```
125
126当您非常确定某个分区是存在的,可以使用以下简便方法:
127
128```go
129section := cfg.Section("section name")
130```
131
132如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
133
134创建一个分区:
135
136```go
137err := cfg.NewSection("new section")
138```
139
140获取所有分区对象或名称:
141
142```go
143sections := cfg.Sections()
144names := cfg.SectionStrings()
145```
146
147### 操作键(Key)
148
149获取某个分区下的键:
150
151```go
152key, err := cfg.Section("").GetKey("key name")
153```
154
155和分区一样,您也可以直接获取键而忽略错误处理:
156
157```go
158key := cfg.Section("").Key("key name")
159```
160
161判断某个键是否存在:
162
163```go
164yes := cfg.Section("").HasKey("key name")
165```
166
167创建一个新的键:
168
169```go
170err := cfg.Section("").NewKey("name", "value")
171```
172
173获取分区下的所有键或键名:
174
175```go
176keys := cfg.Section("").Keys()
177names := cfg.Section("").KeyStrings()
178```
179
180获取分区下的所有键值对的克隆:
181
182```go
183hash := cfg.Section("").KeysHash()
184```
185
186### 操作键值(Value)
187
188获取一个类型为字符串(string)的值:
189
190```go
191val := cfg.Section("").Key("key name").String()
192```
193
194获取值的同时通过自定义函数进行处理验证:
195
196```go
197val := cfg.Section("").Key("key name").Validate(func(in string) string {
198 if len(in) == 0 {
199 return "default"
200 }
201 return in
202})
203```
204
205如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳):
206
207```go
208val := cfg.Section("").Key("key name").Value()
209```
210
211判断某个原值是否存在:
212
213```go
214yes := cfg.Section("").HasValue("test value")
215```
216
217获取其它类型的值:
218
219```go
220// 布尔值的规则:
221// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
222// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
223v, err = cfg.Section("").Key("BOOL").Bool()
224v, err = cfg.Section("").Key("FLOAT64").Float64()
225v, err = cfg.Section("").Key("INT").Int()
226v, err = cfg.Section("").Key("INT64").Int64()
227v, err = cfg.Section("").Key("UINT").Uint()
228v, err = cfg.Section("").Key("UINT64").Uint64()
229v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
230v, err = cfg.Section("").Key("TIME").Time() // RFC3339
231
232v = cfg.Section("").Key("BOOL").MustBool()
233v = cfg.Section("").Key("FLOAT64").MustFloat64()
234v = cfg.Section("").Key("INT").MustInt()
235v = cfg.Section("").Key("INT64").MustInt64()
236v = cfg.Section("").Key("UINT").MustUint()
237v = cfg.Section("").Key("UINT64").MustUint64()
238v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
239v = cfg.Section("").Key("TIME").MustTime() // RFC3339
240
241// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
242// 当键不存在或者转换失败时,则会直接返回该默认值。
243// 但是,MustString 方法必须传递一个默认值。
244
245v = cfg.Seciont("").Key("String").MustString("default")
246v = cfg.Section("").Key("BOOL").MustBool(true)
247v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
248v = cfg.Section("").Key("INT").MustInt(10)
249v = cfg.Section("").Key("INT64").MustInt64(99)
250v = cfg.Section("").Key("UINT").MustUint(3)
251v = cfg.Section("").Key("UINT64").MustUint64(6)
252v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
253v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
254```
255
256如果我的值有好多行怎么办?
257
258```ini
259[advance]
260ADDRESS = """404 road,
261NotFound, State, 5000
262Earth"""
263```
264
265嗯哼?小 case!
266
267```go
268cfg.Section("advance").Key("ADDRESS").String()
269
270/* --- start ---
271404 road,
272NotFound, State, 5000
273Earth
274------ end --- */
275```
276
277赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
278
279```ini
280[advance]
281two_lines = how about \
282 continuation lines?
283lots_of_lines = 1 \
284 2 \
285 3 \
286 4
287```
288
289简直是小菜一碟!
290
291```go
292cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
293cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
294```
295
296可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢?
297
298```go
299cfg, err := ini.LoadSources(ini.LoadOptions{
300 IgnoreContinuation: true,
301}, "filename")
302```
303
304哇靠给力啊!
305
306需要注意的是,值两侧的单引号会被自动剔除:
307
308```ini
309foo = "some value" // foo: some value
310bar = 'some value' // bar: some value
311```
312
313这就是全部了?哈哈,当然不是。
314
315#### 操作键值的辅助方法
316
317获取键值时设定候选值:
318
319```go
320v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
321v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
322v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
323v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
324v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
325v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
326v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
327v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
328```
329
330如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
331
332验证获取的值是否在指定范围内:
333
334```go
335vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
336vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
337vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
338vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
339vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
340vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
341vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
342```
343
344##### 自动分割键值到切片(slice)
345
346当存在无效输入时,使用零值代替:
347
348```go
349// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
350// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
351vals = cfg.Section("").Key("STRINGS").Strings(",")
352vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
353vals = cfg.Section("").Key("INTS").Ints(",")
354vals = cfg.Section("").Key("INT64S").Int64s(",")
355vals = cfg.Section("").Key("UINTS").Uints(",")
356vals = cfg.Section("").Key("UINT64S").Uint64s(",")
357vals = cfg.Section("").Key("TIMES").Times(",")
358```
359
360从结果切片中剔除无效输入:
361
362```go
363// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
364// Input: how, 2.2, are, you -> [2.2]
365vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
366vals = cfg.Section("").Key("INTS").ValidInts(",")
367vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
368vals = cfg.Section("").Key("UINTS").ValidUints(",")
369vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
370vals = cfg.Section("").Key("TIMES").ValidTimes(",")
371```
372
373当存在无效输入时,直接返回错误:
374
375```go
376// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
377// Input: how, 2.2, are, you -> error
378vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
379vals = cfg.Section("").Key("INTS").StrictInts(",")
380vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
381vals = cfg.Section("").Key("UINTS").StrictUints(",")
382vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
383vals = cfg.Section("").Key("TIMES").StrictTimes(",")
384```
385
386### 保存配置
387
388终于到了这个时刻,是时候保存一下配置了。
389
390比较原始的做法是输出配置到某个文件:
391
392```go
393// ...
394err = cfg.SaveTo("my.ini")
395err = cfg.SaveToIndent("my.ini", "\t")
396```
397
398另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
399
400```go
401// ...
402cfg.WriteTo(writer)
403cfg.WriteToIndent(writer, "\t")
404```
405
406默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能:
407
408```go
409ini.PrettyFormat = false
410```
411
412## 高级用法
413
414### 递归读取键值
415
416在获取所有键值的过程中,特殊语法 `%(<name>)s` 会被应用,其中 `<name>` 可以是相同分区或者默认分区下的键名。字符串 `%(<name>)s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
417
418```ini
419NAME = ini
420
421[author]
422NAME = Unknwon
423GITHUB = https://github.com/%(NAME)s
424
425[package]
426FULL_NAME = github.com/go-ini/%(NAME)s
427```
428
429```go
430cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
431cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
432```
433
434### 读取父子分区
435
436您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
437
438```ini
439NAME = ini
440VERSION = v1
441IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
442
443[package]
444CLONE_URL = https://%(IMPORT_PATH)s
445
446[package.sub]
447```
448
449```go
450cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
451```
452
453#### 获取上级父分区下的所有键名
454
455```go
456cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
457```
458
459### 无法解析的分区
460
461如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理:
462
463```go
464cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
465<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
466
467body := cfg.Section("COMMENTS").Body()
468
469/* --- start ---
470<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
471------ end --- */
472```
473
474### 读取自增键名
475
476如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
477
478```ini
479[features]
480-: Support read/write comments of keys and sections
481-: Support auto-increment of key names
482-: Support load multiple files to overwrite key values
483```
484
485```go
486cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
487```
488
489### 映射到结构
490
491想要使用更加面向对象的方式玩转 INI 吗?好主意。
492
493```ini
494Name = Unknwon
495age = 21
496Male = true
497Born = 1993-01-01T20:17:05Z
498
499[Note]
500Content = Hi is a good man!
501Cities = HangZhou, Boston
502```
503
504```go
505type Note struct {
506 Content string
507 Cities []string
508}
509
510type Person struct {
511 Name string
512 Age int `ini:"age"`
513 Male bool
514 Born time.Time
515 Note
516 Created time.Time `ini:"-"`
517}
518
519func main() {
520 cfg, err := ini.Load("path/to/ini")
521 // ...
522 p := new(Person)
523 err = cfg.MapTo(p)
524 // ...
525
526 // 一切竟可以如此的简单。
527 err = ini.MapTo(p, "path/to/ini")
528 // ...
529
530 // 嗯哼?只需要映射一个分区吗?
531 n := new(Note)
532 err = cfg.Section("Note").MapTo(n)
533 // ...
534}
535```
536
537结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
538
539```go
540// ...
541p := &Person{
542 Name: "Joe",
543}
544// ...
545```
546
547这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
548
549### 从结构反射
550
551可是,我有说不能吗?
552
553```go
554type Embeded struct {
555 Dates []time.Time `delim:"|"`
556 Places []string `ini:"places,omitempty"`
557 None []int `ini:",omitempty"`
558}
559
560type Author struct {
561 Name string `ini:"NAME"`
562 Male bool
563 Age int
564 GPA float64
565 NeverMind string `ini:"-"`
566 *Embeded
567}
568
569func main() {
570 a := &Author{"Unknwon", true, 21, 2.8, "",
571 &Embeded{
572 []time.Time{time.Now(), time.Now()},
573 []string{"HangZhou", "Boston"},
574 []int{},
575 }}
576 cfg := ini.Empty()
577 err = ini.ReflectFrom(cfg, a)
578 // ...
579}
580```
581
582瞧瞧,奇迹发生了。
583
584```ini
585NAME = Unknwon
586Male = true
587Age = 21
588GPA = 2.8
589
590[Embeded]
591Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
592places = HangZhou,Boston
593```
594
595#### 名称映射器(Name Mapper)
596
597为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
598
599目前有 2 款内置的映射器:
600
601- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
602- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
603
604使用方法:
605
606```go
607type Info struct{
608 PackageName string
609}
610
611func main() {
612 err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
613 // ...
614
615 cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
616 // ...
617 info := new(Info)
618 cfg.NameMapper = ini.AllCapsUnderscore
619 err = cfg.MapTo(info)
620 // ...
621}
622```
623
624使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
625
626#### 值映射器(Value Mapper)
627
628值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量:
629
630```go
631type Env struct {
632 Foo string `ini:"foo"`
633}
634
635func main() {
636 cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
637 cfg.ValueMapper = os.ExpandEnv
638 // ...
639 env := &Env{}
640 err = cfg.Section("env").MapTo(env)
641}
642```
643
644本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。
645
646#### 映射/反射的其它说明
647
648任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
649
650```go
651type Child struct {
652 Age string
653}
654
655type Parent struct {
656 Name string
657 Child
658}
659
660type Config struct {
661 City string
662 Parent
663}
664```
665
666示例配置文件:
667
668```ini
669City = Boston
670
671[Parent]
672Name = Unknwon
673
674[Child]
675Age = 21
676```
677
678很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
679
680```go
681type Child struct {
682 Age string
683}
684
685type Parent struct {
686 Name string
687 Child `ini:"Parent"`
688}
689
690type Config struct {
691 City string
692 Parent
693}
694```
695
696示例配置文件:
697
698```ini
699City = Boston
700
701[Parent]
702Name = Unknwon
703Age = 21
704```
705
706## 获取帮助
707
708- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
709- [创建工单](https://github.com/go-ini/ini/issues/new)
710
711## 常见问题
712
713### 字段 `BlockMode` 是什么?
714
715默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
716
717### 为什么要写另一个 INI 解析库?
718
719许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
720
721为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)
diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go
new file mode 100644
index 0000000..80afe74
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/error.go
@@ -0,0 +1,32 @@
1// Copyright 2016 Unknwon
2//
3// Licensed under the Apache License, Version 2.0 (the "License"): you may
4// not use this file except in compliance with the License. You may obtain
5// a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations
13// under the License.
14
15package ini
16
17import (
18 "fmt"
19)
20
21type ErrDelimiterNotFound struct {
22 Line string
23}
24
25func IsErrDelimiterNotFound(err error) bool {
26 _, ok := err.(ErrDelimiterNotFound)
27 return ok
28}
29
30func (err ErrDelimiterNotFound) Error() string {
31 return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
32}
diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go
new file mode 100644
index 0000000..77e0dbd
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/ini.go
@@ -0,0 +1,535 @@
1// Copyright 2014 Unknwon
2//
3// Licensed under the Apache License, Version 2.0 (the "License"): you may
4// not use this file except in compliance with the License. You may obtain
5// a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations
13// under the License.
14
15// Package ini provides INI file read and write functionality in Go.
16package ini
17
18import (
19 "bytes"
20 "errors"
21 "fmt"
22 "io"
23 "io/ioutil"
24 "os"
25 "regexp"
26 "runtime"
27 "strconv"
28 "strings"
29 "sync"
30 "time"
31)
32
33const (
34 // Name for default section. You can use this constant or the string literal.
35 // In most of cases, an empty string is all you need to access the section.
36 DEFAULT_SECTION = "DEFAULT"
37
38 // Maximum allowed depth when recursively substituing variable names.
39 _DEPTH_VALUES = 99
40 _VERSION = "1.23.1"
41)
42
43// Version returns current package version literal.
44func Version() string {
45 return _VERSION
46}
47
48var (
49 // Delimiter to determine or compose a new line.
50 // This variable will be changed to "\r\n" automatically on Windows
51 // at package init time.
52 LineBreak = "\n"
53
54 // Variable regexp pattern: %(variable)s
55 varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
56
57 // Indicate whether to align "=" sign with spaces to produce pretty output
58 // or reduce all possible spaces for compact format.
59 PrettyFormat = true
60
61 // Explicitly write DEFAULT section header
62 DefaultHeader = false
63)
64
65func init() {
66 if runtime.GOOS == "windows" {
67 LineBreak = "\r\n"
68 }
69}
70
71func inSlice(str string, s []string) bool {
72 for _, v := range s {
73 if str == v {
74 return true
75 }
76 }
77 return false
78}
79
80// dataSource is an interface that returns object which can be read and closed.
81type dataSource interface {
82 ReadCloser() (io.ReadCloser, error)
83}
84
85// sourceFile represents an object that contains content on the local file system.
86type sourceFile struct {
87 name string
88}
89
90func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
91 return os.Open(s.name)
92}
93
94type bytesReadCloser struct {
95 reader io.Reader
96}
97
98func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
99 return rc.reader.Read(p)
100}
101
102func (rc *bytesReadCloser) Close() error {
103 return nil
104}
105
106// sourceData represents an object that contains content in memory.
107type sourceData struct {
108 data []byte
109}
110
111func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
112 return ioutil.NopCloser(bytes.NewReader(s.data)), nil
113}
114
115// sourceReadCloser represents an input stream with Close method.
116type sourceReadCloser struct {
117 reader io.ReadCloser
118}
119
120func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
121 return s.reader, nil
122}
123
124// File represents a combination of a or more INI file(s) in memory.
125type File struct {
126 // Should make things safe, but sometimes doesn't matter.
127 BlockMode bool
128 // Make sure data is safe in multiple goroutines.
129 lock sync.RWMutex
130
131 // Allow combination of multiple data sources.
132 dataSources []dataSource
133 // Actual data is stored here.
134 sections map[string]*Section
135
136 // To keep data in order.
137 sectionList []string
138
139 options LoadOptions
140
141 NameMapper
142 ValueMapper
143}
144
145// newFile initializes File object with given data sources.
146func newFile(dataSources []dataSource, opts LoadOptions) *File {
147 return &File{
148 BlockMode: true,
149 dataSources: dataSources,
150 sections: make(map[string]*Section),
151 sectionList: make([]string, 0, 10),
152 options: opts,
153 }
154}
155
156func parseDataSource(source interface{}) (dataSource, error) {
157 switch s := source.(type) {
158 case string:
159 return sourceFile{s}, nil
160 case []byte:
161 return &sourceData{s}, nil
162 case io.ReadCloser:
163 return &sourceReadCloser{s}, nil
164 default:
165 return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
166 }
167}
168
169type LoadOptions struct {
170 // Loose indicates whether the parser should ignore nonexistent files or return error.
171 Loose bool
172 // Insensitive indicates whether the parser forces all section and key names to lowercase.
173 Insensitive bool
174 // IgnoreContinuation indicates whether to ignore continuation lines while parsing.
175 IgnoreContinuation bool
176 // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
177 // This type of keys are mostly used in my.cnf.
178 AllowBooleanKeys bool
179 // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
180 // conform to key/value pairs. Specify the names of those blocks here.
181 UnparseableSections []string
182}
183
184func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
185 sources := make([]dataSource, len(others)+1)
186 sources[0], err = parseDataSource(source)
187 if err != nil {
188 return nil, err
189 }
190 for i := range others {
191 sources[i+1], err = parseDataSource(others[i])
192 if err != nil {
193 return nil, err
194 }
195 }
196 f := newFile(sources, opts)
197 if err = f.Reload(); err != nil {
198 return nil, err
199 }
200 return f, nil
201}
202
203// Load loads and parses from INI data sources.
204// Arguments can be mixed of file name with string type, or raw data in []byte.
205// It will return error if list contains nonexistent files.
206func Load(source interface{}, others ...interface{}) (*File, error) {
207 return LoadSources(LoadOptions{}, source, others...)
208}
209
210// LooseLoad has exactly same functionality as Load function
211// except it ignores nonexistent files instead of returning error.
212func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
213 return LoadSources(LoadOptions{Loose: true}, source, others...)
214}
215
216// InsensitiveLoad has exactly same functionality as Load function
217// except it forces all section and key names to be lowercased.
218func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
219 return LoadSources(LoadOptions{Insensitive: true}, source, others...)
220}
221
222// Empty returns an empty file object.
223func Empty() *File {
224 // Ignore error here, we sure our data is good.
225 f, _ := Load([]byte(""))
226 return f
227}
228
229// NewSection creates a new section.
230func (f *File) NewSection(name string) (*Section, error) {
231 if len(name) == 0 {
232 return nil, errors.New("error creating new section: empty section name")
233 } else if f.options.Insensitive && name != DEFAULT_SECTION {
234 name = strings.ToLower(name)
235 }
236
237 if f.BlockMode {
238 f.lock.Lock()
239 defer f.lock.Unlock()
240 }
241
242 if inSlice(name, f.sectionList) {
243 return f.sections[name], nil
244 }
245
246 f.sectionList = append(f.sectionList, name)
247 f.sections[name] = newSection(f, name)
248 return f.sections[name], nil
249}
250
251// NewRawSection creates a new section with an unparseable body.
252func (f *File) NewRawSection(name, body string) (*Section, error) {
253 section, err := f.NewSection(name)
254 if err != nil {
255 return nil, err
256 }
257
258 section.isRawSection = true
259 section.rawBody = body
260 return section, nil
261}
262
263// NewSections creates a list of sections.
264func (f *File) NewSections(names ...string) (err error) {
265 for _, name := range names {
266 if _, err = f.NewSection(name); err != nil {
267 return err
268 }
269 }
270 return nil
271}
272
273// GetSection returns section by given name.
274func (f *File) GetSection(name string) (*Section, error) {
275 if len(name) == 0 {
276 name = DEFAULT_SECTION
277 } else if f.options.Insensitive {
278 name = strings.ToLower(name)
279 }
280
281 if f.BlockMode {
282 f.lock.RLock()
283 defer f.lock.RUnlock()
284 }
285
286 sec := f.sections[name]
287 if sec == nil {
288 return nil, fmt.Errorf("section '%s' does not exist", name)
289 }
290 return sec, nil
291}
292
293// Section assumes named section exists and returns a zero-value when not.
294func (f *File) Section(name string) *Section {
295 sec, err := f.GetSection(name)
296 if err != nil {
297 // Note: It's OK here because the only possible error is empty section name,
298 // but if it's empty, this piece of code won't be executed.
299 sec, _ = f.NewSection(name)
300 return sec
301 }
302 return sec
303}
304
305// Section returns list of Section.
306func (f *File) Sections() []*Section {
307 sections := make([]*Section, len(f.sectionList))
308 for i := range f.sectionList {
309 sections[i] = f.Section(f.sectionList[i])
310 }
311 return sections
312}
313
314// SectionStrings returns list of section names.
315func (f *File) SectionStrings() []string {
316 list := make([]string, len(f.sectionList))
317 copy(list, f.sectionList)
318 return list
319}
320
321// DeleteSection deletes a section.
322func (f *File) DeleteSection(name string) {
323 if f.BlockMode {
324 f.lock.Lock()
325 defer f.lock.Unlock()
326 }
327
328 if len(name) == 0 {
329 name = DEFAULT_SECTION
330 }
331
332 for i, s := range f.sectionList {
333 if s == name {
334 f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
335 delete(f.sections, name)
336 return
337 }
338 }
339}
340
341func (f *File) reload(s dataSource) error {
342 r, err := s.ReadCloser()
343 if err != nil {
344 return err
345 }
346 defer r.Close()
347
348 return f.parse(r)
349}
350
351// Reload reloads and parses all data sources.
352func (f *File) Reload() (err error) {
353 for _, s := range f.dataSources {
354 if err = f.reload(s); err != nil {
355 // In loose mode, we create an empty default section for nonexistent files.
356 if os.IsNotExist(err) && f.options.Loose {
357 f.parse(bytes.NewBuffer(nil))
358 continue
359 }
360 return err
361 }
362 }
363 return nil
364}
365
366// Append appends one or more data sources and reloads automatically.
367func (f *File) Append(source interface{}, others ...interface{}) error {
368 ds, err := parseDataSource(source)
369 if err != nil {
370 return err
371 }
372 f.dataSources = append(f.dataSources, ds)
373 for _, s := range others {
374 ds, err = parseDataSource(s)
375 if err != nil {
376 return err
377 }
378 f.dataSources = append(f.dataSources, ds)
379 }
380 return f.Reload()
381}
382
383// WriteToIndent writes content into io.Writer with given indention.
384// If PrettyFormat has been set to be true,
385// it will align "=" sign with spaces under each section.
386func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
387 equalSign := "="
388 if PrettyFormat {
389 equalSign = " = "
390 }
391
392 // Use buffer to make sure target is safe until finish encoding.
393 buf := bytes.NewBuffer(nil)
394 for i, sname := range f.sectionList {
395 sec := f.Section(sname)
396 if len(sec.Comment) > 0 {
397 if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
398 sec.Comment = "; " + sec.Comment
399 }
400 if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil {
401 return 0, err
402 }
403 }
404
405 if i > 0 || DefaultHeader {
406 if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
407 return 0, err
408 }
409 } else {
410 // Write nothing if default section is empty
411 if len(sec.keyList) == 0 {
412 continue
413 }
414 }
415
416 if sec.isRawSection {
417 if _, err = buf.WriteString(sec.rawBody); err != nil {
418 return 0, err
419 }
420 continue
421 }
422
423 // Count and generate alignment length and buffer spaces using the
424 // longest key. Keys may be modifed if they contain certain characters so
425 // we need to take that into account in our calculation.
426 alignLength := 0
427 if PrettyFormat {
428 for _, kname := range sec.keyList {
429 keyLength := len(kname)
430 // First case will surround key by ` and second by """
431 if strings.ContainsAny(kname, "\"=:") {
432 keyLength += 2
433 } else if strings.Contains(kname, "`") {
434 keyLength += 6
435 }
436
437 if keyLength > alignLength {
438 alignLength = keyLength
439 }
440 }
441 }
442 alignSpaces := bytes.Repeat([]byte(" "), alignLength)
443
444 for _, kname := range sec.keyList {
445 key := sec.Key(kname)
446 if len(key.Comment) > 0 {
447 if len(indent) > 0 && sname != DEFAULT_SECTION {
448 buf.WriteString(indent)
449 }
450 if key.Comment[0] != '#' && key.Comment[0] != ';' {
451 key.Comment = "; " + key.Comment
452 }
453 if _, err = buf.WriteString(key.Comment + LineBreak); err != nil {
454 return 0, err
455 }
456 }
457
458 if len(indent) > 0 && sname != DEFAULT_SECTION {
459 buf.WriteString(indent)
460 }
461
462 switch {
463 case key.isAutoIncrement:
464 kname = "-"
465 case strings.ContainsAny(kname, "\"=:"):
466 kname = "`" + kname + "`"
467 case strings.Contains(kname, "`"):
468 kname = `"""` + kname + `"""`
469 }
470 if _, err = buf.WriteString(kname); err != nil {
471 return 0, err
472 }
473
474 if key.isBooleanType {
475 continue
476 }
477
478 // Write out alignment spaces before "=" sign
479 if PrettyFormat {
480 buf.Write(alignSpaces[:alignLength-len(kname)])
481 }
482
483 val := key.value
484 // In case key value contains "\n", "`", "\"", "#" or ";"
485 if strings.ContainsAny(val, "\n`") {
486 val = `"""` + val + `"""`
487 } else if strings.ContainsAny(val, "#;") {
488 val = "`" + val + "`"
489 }
490 if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
491 return 0, err
492 }
493 }
494
495 // Put a line between sections
496 if _, err = buf.WriteString(LineBreak); err != nil {
497 return 0, err
498 }
499 }
500
501 return buf.WriteTo(w)
502}
503
504// WriteTo writes file content into io.Writer.
505func (f *File) WriteTo(w io.Writer) (int64, error) {
506 return f.WriteToIndent(w, "")
507}
508
509// SaveToIndent writes content to file system with given value indention.
510func (f *File) SaveToIndent(filename, indent string) error {
511 // Note: Because we are truncating with os.Create,
512 // so it's safer to save to a temporary file location and rename afte done.
513 tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp"
514 defer os.Remove(tmpPath)
515
516 fw, err := os.Create(tmpPath)
517 if err != nil {
518 return err
519 }
520
521 if _, err = f.WriteToIndent(fw, indent); err != nil {
522 fw.Close()
523 return err
524 }
525 fw.Close()
526
527 // Remove old file and rename the new one.
528 os.Remove(filename)
529 return os.Rename(tmpPath, filename)
530}
531
532// SaveTo writes content to file system.
533func (f *File) SaveTo(filename string) error {
534 return f.SaveToIndent(filename, "")
535}
diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go
new file mode 100644
index 0000000..9738c55
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/key.go
@@ -0,0 +1,633 @@
1// Copyright 2014 Unknwon
2//
3// Licensed under the Apache License, Version 2.0 (the "License"): you may
4// not use this file except in compliance with the License. You may obtain
5// a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations
13// under the License.
14
15package ini
16
17import (
18 "fmt"
19 "strconv"
20 "strings"
21 "time"
22)
23
24// Key represents a key under a section.
25type Key struct {
26 s *Section
27 name string
28 value string
29 isAutoIncrement bool
30 isBooleanType bool
31
32 Comment string
33}
34
35// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
36type ValueMapper func(string) string
37
38// Name returns name of key.
39func (k *Key) Name() string {
40 return k.name
41}
42
43// Value returns raw value of key for performance purpose.
44func (k *Key) Value() string {
45 return k.value
46}
47
48// String returns string representation of value.
49func (k *Key) String() string {
50 val := k.value
51 if k.s.f.ValueMapper != nil {
52 val = k.s.f.ValueMapper(val)
53 }
54 if strings.Index(val, "%") == -1 {
55 return val
56 }
57
58 for i := 0; i < _DEPTH_VALUES; i++ {
59 vr := varPattern.FindString(val)
60 if len(vr) == 0 {
61 break
62 }
63
64 // Take off leading '%(' and trailing ')s'.
65 noption := strings.TrimLeft(vr, "%(")
66 noption = strings.TrimRight(noption, ")s")
67
68 // Search in the same section.
69 nk, err := k.s.GetKey(noption)
70 if err != nil {
71 // Search again in default section.
72 nk, _ = k.s.f.Section("").GetKey(noption)
73 }
74
75 // Substitute by new value and take off leading '%(' and trailing ')s'.
76 val = strings.Replace(val, vr, nk.value, -1)
77 }
78 return val
79}
80
81// Validate accepts a validate function which can
82// return modifed result as key value.
83func (k *Key) Validate(fn func(string) string) string {
84 return fn(k.String())
85}
86
87// parseBool returns the boolean value represented by the string.
88//
89// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
90// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
91// Any other value returns an error.
92func parseBool(str string) (value bool, err error) {
93 switch str {
94 case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
95 return true, nil
96 case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
97 return false, nil
98 }
99 return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
100}
101
102// Bool returns bool type value.
103func (k *Key) Bool() (bool, error) {
104 return parseBool(k.String())
105}
106
107// Float64 returns float64 type value.
108func (k *Key) Float64() (float64, error) {
109 return strconv.ParseFloat(k.String(), 64)
110}
111
112// Int returns int type value.
113func (k *Key) Int() (int, error) {
114 return strconv.Atoi(k.String())
115}
116
117// Int64 returns int64 type value.
118func (k *Key) Int64() (int64, error) {
119 return strconv.ParseInt(k.String(), 10, 64)
120}
121
122// Uint returns uint type valued.
123func (k *Key) Uint() (uint, error) {
124 u, e := strconv.ParseUint(k.String(), 10, 64)
125 return uint(u), e
126}
127
128// Uint64 returns uint64 type value.
129func (k *Key) Uint64() (uint64, error) {
130 return strconv.ParseUint(k.String(), 10, 64)
131}
132
133// Duration returns time.Duration type value.
134func (k *Key) Duration() (time.Duration, error) {
135 return time.ParseDuration(k.String())
136}
137
138// TimeFormat parses with given format and returns time.Time type value.
139func (k *Key) TimeFormat(format string) (time.Time, error) {
140 return time.Parse(format, k.String())
141}
142
143// Time parses with RFC3339 format and returns time.Time type value.
144func (k *Key) Time() (time.Time, error) {
145 return k.TimeFormat(time.RFC3339)
146}
147
148// MustString returns default value if key value is empty.
149func (k *Key) MustString(defaultVal string) string {
150 val := k.String()
151 if len(val) == 0 {
152 k.value = defaultVal
153 return defaultVal
154 }
155 return val
156}
157
158// MustBool always returns value without error,
159// it returns false if error occurs.
160func (k *Key) MustBool(defaultVal ...bool) bool {
161 val, err := k.Bool()
162 if len(defaultVal) > 0 && err != nil {
163 k.value = strconv.FormatBool(defaultVal[0])
164 return defaultVal[0]
165 }
166 return val
167}
168
169// MustFloat64 always returns value without error,
170// it returns 0.0 if error occurs.
171func (k *Key) MustFloat64(defaultVal ...float64) float64 {
172 val, err := k.Float64()
173 if len(defaultVal) > 0 && err != nil {
174 k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
175 return defaultVal[0]
176 }
177 return val
178}
179
180// MustInt always returns value without error,
181// it returns 0 if error occurs.
182func (k *Key) MustInt(defaultVal ...int) int {
183 val, err := k.Int()
184 if len(defaultVal) > 0 && err != nil {
185 k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
186 return defaultVal[0]
187 }
188 return val
189}
190
191// MustInt64 always returns value without error,
192// it returns 0 if error occurs.
193func (k *Key) MustInt64(defaultVal ...int64) int64 {
194 val, err := k.Int64()
195 if len(defaultVal) > 0 && err != nil {
196 k.value = strconv.FormatInt(defaultVal[0], 10)
197 return defaultVal[0]
198 }
199 return val
200}
201
202// MustUint always returns value without error,
203// it returns 0 if error occurs.
204func (k *Key) MustUint(defaultVal ...uint) uint {
205 val, err := k.Uint()
206 if len(defaultVal) > 0 && err != nil {
207 k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
208 return defaultVal[0]
209 }
210 return val
211}
212
213// MustUint64 always returns value without error,
214// it returns 0 if error occurs.
215func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
216 val, err := k.Uint64()
217 if len(defaultVal) > 0 && err != nil {
218 k.value = strconv.FormatUint(defaultVal[0], 10)
219 return defaultVal[0]
220 }
221 return val
222}
223
224// MustDuration always returns value without error,
225// it returns zero value if error occurs.
226func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
227 val, err := k.Duration()
228 if len(defaultVal) > 0 && err != nil {
229 k.value = defaultVal[0].String()
230 return defaultVal[0]
231 }
232 return val
233}
234
235// MustTimeFormat always parses with given format and returns value without error,
236// it returns zero value if error occurs.
237func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
238 val, err := k.TimeFormat(format)
239 if len(defaultVal) > 0 && err != nil {
240 k.value = defaultVal[0].Format(format)
241 return defaultVal[0]
242 }
243 return val
244}
245
246// MustTime always parses with RFC3339 format and returns value without error,
247// it returns zero value if error occurs.
248func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
249 return k.MustTimeFormat(time.RFC3339, defaultVal...)
250}
251
252// In always returns value without error,
253// it returns default value if error occurs or doesn't fit into candidates.
254func (k *Key) In(defaultVal string, candidates []string) string {
255 val := k.String()
256 for _, cand := range candidates {
257 if val == cand {
258 return val
259 }
260 }
261 return defaultVal
262}
263
264// InFloat64 always returns value without error,
265// it returns default value if error occurs or doesn't fit into candidates.
266func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
267 val := k.MustFloat64()
268 for _, cand := range candidates {
269 if val == cand {
270 return val
271 }
272 }
273 return defaultVal
274}
275
276// InInt always returns value without error,
277// it returns default value if error occurs or doesn't fit into candidates.
278func (k *Key) InInt(defaultVal int, candidates []int) int {
279 val := k.MustInt()
280 for _, cand := range candidates {
281 if val == cand {
282 return val
283 }
284 }
285 return defaultVal
286}
287
288// InInt64 always returns value without error,
289// it returns default value if error occurs or doesn't fit into candidates.
290func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
291 val := k.MustInt64()
292 for _, cand := range candidates {
293 if val == cand {
294 return val
295 }
296 }
297 return defaultVal
298}
299
300// InUint always returns value without error,
301// it returns default value if error occurs or doesn't fit into candidates.
302func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
303 val := k.MustUint()
304 for _, cand := range candidates {
305 if val == cand {
306 return val
307 }
308 }
309 return defaultVal
310}
311
312// InUint64 always returns value without error,
313// it returns default value if error occurs or doesn't fit into candidates.
314func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
315 val := k.MustUint64()
316 for _, cand := range candidates {
317 if val == cand {
318 return val
319 }
320 }
321 return defaultVal
322}
323
324// InTimeFormat always parses with given format and returns value without error,
325// it returns default value if error occurs or doesn't fit into candidates.
326func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
327 val := k.MustTimeFormat(format)
328 for _, cand := range candidates {
329 if val == cand {
330 return val
331 }
332 }
333 return defaultVal
334}
335
336// InTime always parses with RFC3339 format and returns value without error,
337// it returns default value if error occurs or doesn't fit into candidates.
338func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
339 return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
340}
341
342// RangeFloat64 checks if value is in given range inclusively,
343// and returns default value if it's not.
344func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
345 val := k.MustFloat64()
346 if val < min || val > max {
347 return defaultVal
348 }
349 return val
350}
351
352// RangeInt checks if value is in given range inclusively,
353// and returns default value if it's not.
354func (k *Key) RangeInt(defaultVal, min, max int) int {
355 val := k.MustInt()
356 if val < min || val > max {
357 return defaultVal
358 }
359 return val
360}
361
362// RangeInt64 checks if value is in given range inclusively,
363// and returns default value if it's not.
364func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
365 val := k.MustInt64()
366 if val < min || val > max {
367 return defaultVal
368 }
369 return val
370}
371
372// RangeTimeFormat checks if value with given format is in given range inclusively,
373// and returns default value if it's not.
374func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
375 val := k.MustTimeFormat(format)
376 if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
377 return defaultVal
378 }
379 return val
380}
381
382// RangeTime checks if value with RFC3339 format is in given range inclusively,
383// and returns default value if it's not.
384func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
385 return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
386}
387
388// Strings returns list of string divided by given delimiter.
389func (k *Key) Strings(delim string) []string {
390 str := k.String()
391 if len(str) == 0 {
392 return []string{}
393 }
394
395 vals := strings.Split(str, delim)
396 for i := range vals {
397 vals[i] = strings.TrimSpace(vals[i])
398 }
399 return vals
400}
401
402// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
403func (k *Key) Float64s(delim string) []float64 {
404 vals, _ := k.getFloat64s(delim, true, false)
405 return vals
406}
407
408// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
409func (k *Key) Ints(delim string) []int {
410 vals, _ := k.getInts(delim, true, false)
411 return vals
412}
413
414// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
415func (k *Key) Int64s(delim string) []int64 {
416 vals, _ := k.getInt64s(delim, true, false)
417 return vals
418}
419
420// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
421func (k *Key) Uints(delim string) []uint {
422 vals, _ := k.getUints(delim, true, false)
423 return vals
424}
425
426// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
427func (k *Key) Uint64s(delim string) []uint64 {
428 vals, _ := k.getUint64s(delim, true, false)
429 return vals
430}
431
432// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
433// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
434func (k *Key) TimesFormat(format, delim string) []time.Time {
435 vals, _ := k.getTimesFormat(format, delim, true, false)
436 return vals
437}
438
439// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
440// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
441func (k *Key) Times(delim string) []time.Time {
442 return k.TimesFormat(time.RFC3339, delim)
443}
444
445// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
446// it will not be included to result list.
447func (k *Key) ValidFloat64s(delim string) []float64 {
448 vals, _ := k.getFloat64s(delim, false, false)
449 return vals
450}
451
452// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
453// not be included to result list.
454func (k *Key) ValidInts(delim string) []int {
455 vals, _ := k.getInts(delim, false, false)
456 return vals
457}
458
459// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
460// then it will not be included to result list.
461func (k *Key) ValidInt64s(delim string) []int64 {
462 vals, _ := k.getInt64s(delim, false, false)
463 return vals
464}
465
466// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
467// then it will not be included to result list.
468func (k *Key) ValidUints(delim string) []uint {
469 vals, _ := k.getUints(delim, false, false)
470 return vals
471}
472
473// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
474// integer, then it will not be included to result list.
475func (k *Key) ValidUint64s(delim string) []uint64 {
476 vals, _ := k.getUint64s(delim, false, false)
477 return vals
478}
479
480// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
481func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
482 vals, _ := k.getTimesFormat(format, delim, false, false)
483 return vals
484}
485
486// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
487func (k *Key) ValidTimes(delim string) []time.Time {
488 return k.ValidTimesFormat(time.RFC3339, delim)
489}
490
491// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
492func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
493 return k.getFloat64s(delim, false, true)
494}
495
496// StrictInts returns list of int divided by given delimiter or error on first invalid input.
497func (k *Key) StrictInts(delim string) ([]int, error) {
498 return k.getInts(delim, false, true)
499}
500
501// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
502func (k *Key) StrictInt64s(delim string) ([]int64, error) {
503 return k.getInt64s(delim, false, true)
504}
505
506// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
507func (k *Key) StrictUints(delim string) ([]uint, error) {
508 return k.getUints(delim, false, true)
509}
510
511// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
512func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
513 return k.getUint64s(delim, false, true)
514}
515
516// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
517// or error on first invalid input.
518func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
519 return k.getTimesFormat(format, delim, false, true)
520}
521
522// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
523// or error on first invalid input.
524func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
525 return k.StrictTimesFormat(time.RFC3339, delim)
526}
527
528// getFloat64s returns list of float64 divided by given delimiter.
529func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) {
530 strs := k.Strings(delim)
531 vals := make([]float64, 0, len(strs))
532 for _, str := range strs {
533 val, err := strconv.ParseFloat(str, 64)
534 if err != nil && returnOnInvalid {
535 return nil, err
536 }
537 if err == nil || addInvalid {
538 vals = append(vals, val)
539 }
540 }
541 return vals, nil
542}
543
544// getInts returns list of int divided by given delimiter.
545func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) {
546 strs := k.Strings(delim)
547 vals := make([]int, 0, len(strs))
548 for _, str := range strs {
549 val, err := strconv.Atoi(str)
550 if err != nil && returnOnInvalid {
551 return nil, err
552 }
553 if err == nil || addInvalid {
554 vals = append(vals, val)
555 }
556 }
557 return vals, nil
558}
559
560// getInt64s returns list of int64 divided by given delimiter.
561func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) {
562 strs := k.Strings(delim)
563 vals := make([]int64, 0, len(strs))
564 for _, str := range strs {
565 val, err := strconv.ParseInt(str, 10, 64)
566 if err != nil && returnOnInvalid {
567 return nil, err
568 }
569 if err == nil || addInvalid {
570 vals = append(vals, val)
571 }
572 }
573 return vals, nil
574}
575
576// getUints returns list of uint divided by given delimiter.
577func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) {
578 strs := k.Strings(delim)
579 vals := make([]uint, 0, len(strs))
580 for _, str := range strs {
581 val, err := strconv.ParseUint(str, 10, 0)
582 if err != nil && returnOnInvalid {
583 return nil, err
584 }
585 if err == nil || addInvalid {
586 vals = append(vals, uint(val))
587 }
588 }
589 return vals, nil
590}
591
592// getUint64s returns list of uint64 divided by given delimiter.
593func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
594 strs := k.Strings(delim)
595 vals := make([]uint64, 0, len(strs))
596 for _, str := range strs {
597 val, err := strconv.ParseUint(str, 10, 64)
598 if err != nil && returnOnInvalid {
599 return nil, err
600 }
601 if err == nil || addInvalid {
602 vals = append(vals, val)
603 }
604 }
605 return vals, nil
606}
607
608// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
609func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
610 strs := k.Strings(delim)
611 vals := make([]time.Time, 0, len(strs))
612 for _, str := range strs {
613 val, err := time.Parse(format, str)
614 if err != nil && returnOnInvalid {
615 return nil, err
616 }
617 if err == nil || addInvalid {
618 vals = append(vals, val)
619 }
620 }
621 return vals, nil
622}
623
624// SetValue changes key value.
625func (k *Key) SetValue(v string) {
626 if k.s.f.BlockMode {
627 k.s.f.lock.Lock()
628 defer k.s.f.lock.Unlock()
629 }
630
631 k.value = v
632 k.s.keysHash[k.name] = v
633}
diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go
new file mode 100644
index 0000000..b0aabe3
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/parser.go
@@ -0,0 +1,356 @@
1// Copyright 2015 Unknwon
2//
3// Licensed under the Apache License, Version 2.0 (the "License"): you may
4// not use this file except in compliance with the License. You may obtain
5// a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations
13// under the License.
14
15package ini
16
17import (
18 "bufio"
19 "bytes"
20 "fmt"
21 "io"
22 "strconv"
23 "strings"
24 "unicode"
25)
26
27type tokenType int
28
29const (
30 _TOKEN_INVALID tokenType = iota
31 _TOKEN_COMMENT
32 _TOKEN_SECTION
33 _TOKEN_KEY
34)
35
36type parser struct {
37 buf *bufio.Reader
38 isEOF bool
39 count int
40 comment *bytes.Buffer
41}
42
43func newParser(r io.Reader) *parser {
44 return &parser{
45 buf: bufio.NewReader(r),
46 count: 1,
47 comment: &bytes.Buffer{},
48 }
49}
50
51// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
52// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
53func (p *parser) BOM() error {
54 mask, err := p.buf.Peek(2)
55 if err != nil && err != io.EOF {
56 return err
57 } else if len(mask) < 2 {
58 return nil
59 }
60
61 switch {
62 case mask[0] == 254 && mask[1] == 255:
63 fallthrough
64 case mask[0] == 255 && mask[1] == 254:
65 p.buf.Read(mask)
66 case mask[0] == 239 && mask[1] == 187:
67 mask, err := p.buf.Peek(3)
68 if err != nil && err != io.EOF {
69 return err
70 } else if len(mask) < 3 {
71 return nil
72 }
73 if mask[2] == 191 {
74 p.buf.Read(mask)
75 }
76 }
77 return nil
78}
79
80func (p *parser) readUntil(delim byte) ([]byte, error) {
81 data, err := p.buf.ReadBytes(delim)
82 if err != nil {
83 if err == io.EOF {
84 p.isEOF = true
85 } else {
86 return nil, err
87 }
88 }
89 return data, nil
90}
91
92func cleanComment(in []byte) ([]byte, bool) {
93 i := bytes.IndexAny(in, "#;")
94 if i == -1 {
95 return nil, false
96 }
97 return in[i:], true
98}
99
100func readKeyName(in []byte) (string, int, error) {
101 line := string(in)
102
103 // Check if key name surrounded by quotes.
104 var keyQuote string
105 if line[0] == '"' {
106 if len(line) > 6 && string(line[0:3]) == `"""` {
107 keyQuote = `"""`
108 } else {
109 keyQuote = `"`
110 }
111 } else if line[0] == '`' {
112 keyQuote = "`"
113 }
114
115 // Get out key name
116 endIdx := -1
117 if len(keyQuote) > 0 {
118 startIdx := len(keyQuote)
119 // FIXME: fail case -> """"""name"""=value
120 pos := strings.Index(line[startIdx:], keyQuote)
121 if pos == -1 {
122 return "", -1, fmt.Errorf("missing closing key quote: %s", line)
123 }
124 pos += startIdx
125
126 // Find key-value delimiter
127 i := strings.IndexAny(line[pos+startIdx:], "=:")
128 if i < 0 {
129 return "", -1, ErrDelimiterNotFound{line}
130 }
131 endIdx = pos + i
132 return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
133 }
134
135 endIdx = strings.IndexAny(line, "=:")
136 if endIdx < 0 {
137 return "", -1, ErrDelimiterNotFound{line}
138 }
139 return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
140}
141
142func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
143 for {
144 data, err := p.readUntil('\n')
145 if err != nil {
146 return "", err
147 }
148 next := string(data)
149
150 pos := strings.LastIndex(next, valQuote)
151 if pos > -1 {
152 val += next[:pos]
153
154 comment, has := cleanComment([]byte(next[pos:]))
155 if has {
156 p.comment.Write(bytes.TrimSpace(comment))
157 }
158 break
159 }
160 val += next
161 if p.isEOF {
162 return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
163 }
164 }
165 return val, nil
166}
167
168func (p *parser) readContinuationLines(val string) (string, error) {
169 for {
170 data, err := p.readUntil('\n')
171 if err != nil {
172 return "", err
173 }
174 next := strings.TrimSpace(string(data))
175
176 if len(next) == 0 {
177 break
178 }
179 val += next
180 if val[len(val)-1] != '\\' {
181 break
182 }
183 val = val[:len(val)-1]
184 }
185 return val, nil
186}
187
188// hasSurroundedQuote check if and only if the first and last characters
189// are quotes \" or \'.
190// It returns false if any other parts also contain same kind of quotes.
191func hasSurroundedQuote(in string, quote byte) bool {
192 return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote &&
193 strings.IndexByte(in[1:], quote) == len(in)-2
194}
195
196func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) {
197 line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
198 if len(line) == 0 {
199 return "", nil
200 }
201
202 var valQuote string
203 if len(line) > 3 && string(line[0:3]) == `"""` {
204 valQuote = `"""`
205 } else if line[0] == '`' {
206 valQuote = "`"
207 }
208
209 if len(valQuote) > 0 {
210 startIdx := len(valQuote)
211 pos := strings.LastIndex(line[startIdx:], valQuote)
212 // Check for multi-line value
213 if pos == -1 {
214 return p.readMultilines(line, line[startIdx:], valQuote)
215 }
216
217 return line[startIdx : pos+startIdx], nil
218 }
219
220 // Won't be able to reach here if value only contains whitespace.
221 line = strings.TrimSpace(line)
222
223 // Check continuation lines when desired.
224 if !ignoreContinuation && line[len(line)-1] == '\\' {
225 return p.readContinuationLines(line[:len(line)-1])
226 }
227
228 i := strings.IndexAny(line, "#;")
229 if i > -1 {
230 p.comment.WriteString(line[i:])
231 line = strings.TrimSpace(line[:i])
232 }
233
234 // Trim single quotes
235 if hasSurroundedQuote(line, '\'') ||
236 hasSurroundedQuote(line, '"') {
237 line = line[1 : len(line)-1]
238 }
239 return line, nil
240}
241
242// parse parses data through an io.Reader.
243func (f *File) parse(reader io.Reader) (err error) {
244 p := newParser(reader)
245 if err = p.BOM(); err != nil {
246 return fmt.Errorf("BOM: %v", err)
247 }
248
249 // Ignore error because default section name is never empty string.
250 section, _ := f.NewSection(DEFAULT_SECTION)
251
252 var line []byte
253 var inUnparseableSection bool
254 for !p.isEOF {
255 line, err = p.readUntil('\n')
256 if err != nil {
257 return err
258 }
259
260 line = bytes.TrimLeftFunc(line, unicode.IsSpace)
261 if len(line) == 0 {
262 continue
263 }
264
265 // Comments
266 if line[0] == '#' || line[0] == ';' {
267 // Note: we do not care ending line break,
268 // it is needed for adding second line,
269 // so just clean it once at the end when set to value.
270 p.comment.Write(line)
271 continue
272 }
273
274 // Section
275 if line[0] == '[' {
276 // Read to the next ']' (TODO: support quoted strings)
277 // TODO(unknwon): use LastIndexByte when stop supporting Go1.4
278 closeIdx := bytes.LastIndex(line, []byte("]"))
279 if closeIdx == -1 {
280 return fmt.Errorf("unclosed section: %s", line)
281 }
282
283 name := string(line[1:closeIdx])
284 section, err = f.NewSection(name)
285 if err != nil {
286 return err
287 }
288
289 comment, has := cleanComment(line[closeIdx+1:])
290 if has {
291 p.comment.Write(comment)
292 }
293
294 section.Comment = strings.TrimSpace(p.comment.String())
295
296 // Reset aotu-counter and comments
297 p.comment.Reset()
298 p.count = 1
299
300 inUnparseableSection = false
301 for i := range f.options.UnparseableSections {
302 if f.options.UnparseableSections[i] == name ||
303 (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
304 inUnparseableSection = true
305 continue
306 }
307 }
308 continue
309 }
310
311 if inUnparseableSection {
312 section.isRawSection = true
313 section.rawBody += string(line)
314 continue
315 }
316
317 kname, offset, err := readKeyName(line)
318 if err != nil {
319 // Treat as boolean key when desired, and whole line is key name.
320 if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
321 key, err := section.NewKey(string(line), "true")
322 if err != nil {
323 return err
324 }
325 key.isBooleanType = true
326 key.Comment = strings.TrimSpace(p.comment.String())
327 p.comment.Reset()
328 continue
329 }
330 return err
331 }
332
333 // Auto increment.
334 isAutoIncr := false
335 if kname == "-" {
336 isAutoIncr = true
337 kname = "#" + strconv.Itoa(p.count)
338 p.count++
339 }
340
341 key, err := section.NewKey(kname, "")
342 if err != nil {
343 return err
344 }
345 key.isAutoIncrement = isAutoIncr
346
347 value, err := p.readValue(line[offset:], f.options.IgnoreContinuation)
348 if err != nil {
349 return err
350 }
351 key.SetValue(value)
352 key.Comment = strings.TrimSpace(p.comment.String())
353 p.comment.Reset()
354 }
355 return nil
356}
diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go
new file mode 100644
index 0000000..45d2f3b
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/section.go
@@ -0,0 +1,221 @@
1// Copyright 2014 Unknwon
2//
3// Licensed under the Apache License, Version 2.0 (the "License"): you may
4// not use this file except in compliance with the License. You may obtain
5// a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations
13// under the License.
14
15package ini
16
17import (
18 "errors"
19 "fmt"
20 "strings"
21)
22
23// Section represents a config section.
24type Section struct {
25 f *File
26 Comment string
27 name string
28 keys map[string]*Key
29 keyList []string
30 keysHash map[string]string
31
32 isRawSection bool
33 rawBody string
34}
35
36func newSection(f *File, name string) *Section {
37 return &Section{
38 f: f,
39 name: name,
40 keys: make(map[string]*Key),
41 keyList: make([]string, 0, 10),
42 keysHash: make(map[string]string),
43 }
44}
45
46// Name returns name of Section.
47func (s *Section) Name() string {
48 return s.name
49}
50
51// Body returns rawBody of Section if the section was marked as unparseable.
52// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
53func (s *Section) Body() string {
54 return strings.TrimSpace(s.rawBody)
55}
56
57// NewKey creates a new key to given section.
58func (s *Section) NewKey(name, val string) (*Key, error) {
59 if len(name) == 0 {
60 return nil, errors.New("error creating new key: empty key name")
61 } else if s.f.options.Insensitive {
62 name = strings.ToLower(name)
63 }
64
65 if s.f.BlockMode {
66 s.f.lock.Lock()
67 defer s.f.lock.Unlock()
68 }
69
70 if inSlice(name, s.keyList) {
71 s.keys[name].value = val
72 return s.keys[name], nil
73 }
74
75 s.keyList = append(s.keyList, name)
76 s.keys[name] = &Key{
77 s: s,
78 name: name,
79 value: val,
80 }
81 s.keysHash[name] = val
82 return s.keys[name], nil
83}
84
85// GetKey returns key in section by given name.
86func (s *Section) GetKey(name string) (*Key, error) {
87 // FIXME: change to section level lock?
88 if s.f.BlockMode {
89 s.f.lock.RLock()
90 }
91 if s.f.options.Insensitive {
92 name = strings.ToLower(name)
93 }
94 key := s.keys[name]
95 if s.f.BlockMode {
96 s.f.lock.RUnlock()
97 }
98
99 if key == nil {
100 // Check if it is a child-section.
101 sname := s.name
102 for {
103 if i := strings.LastIndex(sname, "."); i > -1 {
104 sname = sname[:i]
105 sec, err := s.f.GetSection(sname)
106 if err != nil {
107 continue
108 }
109 return sec.GetKey(name)
110 } else {
111 break
112 }
113 }
114 return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
115 }
116 return key, nil
117}
118
119// HasKey returns true if section contains a key with given name.
120func (s *Section) HasKey(name string) bool {
121 key, _ := s.GetKey(name)
122 return key != nil
123}
124
125// Haskey is a backwards-compatible name for HasKey.
126func (s *Section) Haskey(name string) bool {
127 return s.HasKey(name)
128}
129
130// HasValue returns true if section contains given raw value.
131func (s *Section) HasValue(value string) bool {
132 if s.f.BlockMode {
133 s.f.lock.RLock()
134 defer s.f.lock.RUnlock()
135 }
136
137 for _, k := range s.keys {
138 if value == k.value {
139 return true
140 }
141 }
142 return false
143}
144
145// Key assumes named Key exists in section and returns a zero-value when not.
146func (s *Section) Key(name string) *Key {
147 key, err := s.GetKey(name)
148 if err != nil {
149 // It's OK here because the only possible error is empty key name,
150 // but if it's empty, this piece of code won't be executed.
151 key, _ = s.NewKey(name, "")
152 return key
153 }
154 return key
155}
156
157// Keys returns list of keys of section.
158func (s *Section) Keys() []*Key {
159 keys := make([]*Key, len(s.keyList))
160 for i := range s.keyList {
161 keys[i] = s.Key(s.keyList[i])
162 }
163 return keys
164}
165
166// ParentKeys returns list of keys of parent section.
167func (s *Section) ParentKeys() []*Key {
168 var parentKeys []*Key
169 sname := s.name
170 for {
171 if i := strings.LastIndex(sname, "."); i > -1 {
172 sname = sname[:i]
173 sec, err := s.f.GetSection(sname)
174 if err != nil {
175 continue
176 }
177 parentKeys = append(parentKeys, sec.Keys()...)
178 } else {
179 break
180 }
181
182 }
183 return parentKeys
184}
185
186// KeyStrings returns list of key names of section.
187func (s *Section) KeyStrings() []string {
188 list := make([]string, len(s.keyList))
189 copy(list, s.keyList)
190 return list
191}
192
193// KeysHash returns keys hash consisting of names and values.
194func (s *Section) KeysHash() map[string]string {
195 if s.f.BlockMode {
196 s.f.lock.RLock()
197 defer s.f.lock.RUnlock()
198 }
199
200 hash := map[string]string{}
201 for key, value := range s.keysHash {
202 hash[key] = value
203 }
204 return hash
205}
206
207// DeleteKey deletes a key from section.
208func (s *Section) DeleteKey(name string) {
209 if s.f.BlockMode {
210 s.f.lock.Lock()
211 defer s.f.lock.Unlock()
212 }
213
214 for i, k := range s.keyList {
215 if k == name {
216 s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
217 delete(s.keys, name)
218 return
219 }
220 }
221}
diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go
new file mode 100644
index 0000000..5ef38d8
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/struct.go
@@ -0,0 +1,431 @@
1// Copyright 2014 Unknwon
2//
3// Licensed under the Apache License, Version 2.0 (the "License"): you may
4// not use this file except in compliance with the License. You may obtain
5// a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12// License for the specific language governing permissions and limitations
13// under the License.
14
15package ini
16
17import (
18 "bytes"
19 "errors"
20 "fmt"
21 "reflect"
22 "strings"
23 "time"
24 "unicode"
25)
26
27// NameMapper represents a ini tag name mapper.
28type NameMapper func(string) string
29
30// Built-in name getters.
31var (
32 // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
33 AllCapsUnderscore NameMapper = func(raw string) string {
34 newstr := make([]rune, 0, len(raw))
35 for i, chr := range raw {
36 if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
37 if i > 0 {
38 newstr = append(newstr, '_')
39 }
40 }
41 newstr = append(newstr, unicode.ToUpper(chr))
42 }
43 return string(newstr)
44 }
45 // TitleUnderscore converts to format title_underscore.
46 TitleUnderscore NameMapper = func(raw string) string {
47 newstr := make([]rune, 0, len(raw))
48 for i, chr := range raw {
49 if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
50 if i > 0 {
51 newstr = append(newstr, '_')
52 }
53 chr -= ('A' - 'a')
54 }
55 newstr = append(newstr, chr)
56 }
57 return string(newstr)
58 }
59)
60
61func (s *Section) parseFieldName(raw, actual string) string {
62 if len(actual) > 0 {
63 return actual
64 }
65 if s.f.NameMapper != nil {
66 return s.f.NameMapper(raw)
67 }
68 return raw
69}
70
71func parseDelim(actual string) string {
72 if len(actual) > 0 {
73 return actual
74 }
75 return ","
76}
77
78var reflectTime = reflect.TypeOf(time.Now()).Kind()
79
80// setSliceWithProperType sets proper values to slice based on its type.
81func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
82 strs := key.Strings(delim)
83 numVals := len(strs)
84 if numVals == 0 {
85 return nil
86 }
87
88 var vals interface{}
89
90 sliceOf := field.Type().Elem().Kind()
91 switch sliceOf {
92 case reflect.String:
93 vals = strs
94 case reflect.Int:
95 vals = key.Ints(delim)
96 case reflect.Int64:
97 vals = key.Int64s(delim)
98 case reflect.Uint:
99 vals = key.Uints(delim)
100 case reflect.Uint64:
101 vals = key.Uint64s(delim)
102 case reflect.Float64:
103 vals = key.Float64s(delim)
104 case reflectTime:
105 vals = key.Times(delim)
106 default:
107 return fmt.Errorf("unsupported type '[]%s'", sliceOf)
108 }
109
110 slice := reflect.MakeSlice(field.Type(), numVals, numVals)
111 for i := 0; i < numVals; i++ {
112 switch sliceOf {
113 case reflect.String:
114 slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
115 case reflect.Int:
116 slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
117 case reflect.Int64:
118 slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
119 case reflect.Uint:
120 slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
121 case reflect.Uint64:
122 slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
123 case reflect.Float64:
124 slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
125 case reflectTime:
126 slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
127 }
128 }
129 field.Set(slice)
130 return nil
131}
132
133// setWithProperType sets proper value to field based on its type,
134// but it does not return error for failing parsing,
135// because we want to use default value that is already assigned to strcut.
136func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
137 switch t.Kind() {
138 case reflect.String:
139 if len(key.String()) == 0 {
140 return nil
141 }
142 field.SetString(key.String())
143 case reflect.Bool:
144 boolVal, err := key.Bool()
145 if err != nil {
146 return nil
147 }
148 field.SetBool(boolVal)
149 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
150 durationVal, err := key.Duration()
151 // Skip zero value
152 if err == nil && int(durationVal) > 0 {
153 field.Set(reflect.ValueOf(durationVal))
154 return nil
155 }
156
157 intVal, err := key.Int64()
158 if err != nil || intVal == 0 {
159 return nil
160 }
161 field.SetInt(intVal)
162 // byte is an alias for uint8, so supporting uint8 breaks support for byte
163 case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
164 durationVal, err := key.Duration()
165 // Skip zero value
166 if err == nil && int(durationVal) > 0 {
167 field.Set(reflect.ValueOf(durationVal))
168 return nil
169 }
170
171 uintVal, err := key.Uint64()
172 if err != nil {
173 return nil
174 }
175 field.SetUint(uintVal)
176
177 case reflect.Float32, reflect.Float64:
178 floatVal, err := key.Float64()
179 if err != nil {
180 return nil
181 }
182 field.SetFloat(floatVal)
183 case reflectTime:
184 timeVal, err := key.Time()
185 if err != nil {
186 return nil
187 }
188 field.Set(reflect.ValueOf(timeVal))
189 case reflect.Slice:
190 return setSliceWithProperType(key, field, delim)
191 default:
192 return fmt.Errorf("unsupported type '%s'", t)
193 }
194 return nil
195}
196
197func (s *Section) mapTo(val reflect.Value) error {
198 if val.Kind() == reflect.Ptr {
199 val = val.Elem()
200 }
201 typ := val.Type()
202
203 for i := 0; i < typ.NumField(); i++ {
204 field := val.Field(i)
205 tpField := typ.Field(i)
206
207 tag := tpField.Tag.Get("ini")
208 if tag == "-" {
209 continue
210 }
211
212 opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty
213 fieldName := s.parseFieldName(tpField.Name, opts[0])
214 if len(fieldName) == 0 || !field.CanSet() {
215 continue
216 }
217
218 isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
219 isStruct := tpField.Type.Kind() == reflect.Struct
220 if isAnonymous {
221 field.Set(reflect.New(tpField.Type.Elem()))
222 }
223
224 if isAnonymous || isStruct {
225 if sec, err := s.f.GetSection(fieldName); err == nil {
226 if err = sec.mapTo(field); err != nil {
227 return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
228 }
229 continue
230 }
231 }
232
233 if key, err := s.GetKey(fieldName); err == nil {
234 if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
235 return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
236 }
237 }
238 }
239 return nil
240}
241
242// MapTo maps section to given struct.
243func (s *Section) MapTo(v interface{}) error {
244 typ := reflect.TypeOf(v)
245 val := reflect.ValueOf(v)
246 if typ.Kind() == reflect.Ptr {
247 typ = typ.Elem()
248 val = val.Elem()
249 } else {
250 return errors.New("cannot map to non-pointer struct")
251 }
252
253 return s.mapTo(val)
254}
255
256// MapTo maps file to given struct.
257func (f *File) MapTo(v interface{}) error {
258 return f.Section("").MapTo(v)
259}
260
261// MapTo maps data sources to given struct with name mapper.
262func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
263 cfg, err := Load(source, others...)
264 if err != nil {
265 return err
266 }
267 cfg.NameMapper = mapper
268 return cfg.MapTo(v)
269}
270
271// MapTo maps data sources to given struct.
272func MapTo(v, source interface{}, others ...interface{}) error {
273 return MapToWithMapper(v, nil, source, others...)
274}
275
276// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
277func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
278 slice := field.Slice(0, field.Len())
279 if field.Len() == 0 {
280 return nil
281 }
282
283 var buf bytes.Buffer
284 sliceOf := field.Type().Elem().Kind()
285 for i := 0; i < field.Len(); i++ {
286 switch sliceOf {
287 case reflect.String:
288 buf.WriteString(slice.Index(i).String())
289 case reflect.Int, reflect.Int64:
290 buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
291 case reflect.Uint, reflect.Uint64:
292 buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
293 case reflect.Float64:
294 buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
295 case reflectTime:
296 buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
297 default:
298 return fmt.Errorf("unsupported type '[]%s'", sliceOf)
299 }
300 buf.WriteString(delim)
301 }
302 key.SetValue(buf.String()[:buf.Len()-1])
303 return nil
304}
305
306// reflectWithProperType does the opposite thing as setWithProperType.
307func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
308 switch t.Kind() {
309 case reflect.String:
310 key.SetValue(field.String())
311 case reflect.Bool:
312 key.SetValue(fmt.Sprint(field.Bool()))
313 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
314 key.SetValue(fmt.Sprint(field.Int()))
315 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
316 key.SetValue(fmt.Sprint(field.Uint()))
317 case reflect.Float32, reflect.Float64:
318 key.SetValue(fmt.Sprint(field.Float()))
319 case reflectTime:
320 key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
321 case reflect.Slice:
322 return reflectSliceWithProperType(key, field, delim)
323 default:
324 return fmt.Errorf("unsupported type '%s'", t)
325 }
326 return nil
327}
328
329// CR: copied from encoding/json/encode.go with modifications of time.Time support.
330// TODO: add more test coverage.
331func isEmptyValue(v reflect.Value) bool {
332 switch v.Kind() {
333 case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
334 return v.Len() == 0
335 case reflect.Bool:
336 return !v.Bool()
337 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
338 return v.Int() == 0
339 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
340 return v.Uint() == 0
341 case reflect.Float32, reflect.Float64:
342 return v.Float() == 0
343 case reflectTime:
344 return v.Interface().(time.Time).IsZero()
345 case reflect.Interface, reflect.Ptr:
346 return v.IsNil()
347 }
348 return false
349}
350
351func (s *Section) reflectFrom(val reflect.Value) error {
352 if val.Kind() == reflect.Ptr {
353 val = val.Elem()
354 }
355 typ := val.Type()
356
357 for i := 0; i < typ.NumField(); i++ {
358 field := val.Field(i)
359 tpField := typ.Field(i)
360
361 tag := tpField.Tag.Get("ini")
362 if tag == "-" {
363 continue
364 }
365
366 opts := strings.SplitN(tag, ",", 2)
367 if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) {
368 continue
369 }
370
371 fieldName := s.parseFieldName(tpField.Name, opts[0])
372 if len(fieldName) == 0 || !field.CanSet() {
373 continue
374 }
375
376 if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
377 (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
378 // Note: The only error here is section doesn't exist.
379 sec, err := s.f.GetSection(fieldName)
380 if err != nil {
381 // Note: fieldName can never be empty here, ignore error.
382 sec, _ = s.f.NewSection(fieldName)
383 }
384 if err = sec.reflectFrom(field); err != nil {
385 return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
386 }
387 continue
388 }
389
390 // Note: Same reason as secion.
391 key, err := s.GetKey(fieldName)
392 if err != nil {
393 key, _ = s.NewKey(fieldName, "")
394 }
395 if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
396 return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
397 }
398
399 }
400 return nil
401}
402
403// ReflectFrom reflects secion from given struct.
404func (s *Section) ReflectFrom(v interface{}) error {
405 typ := reflect.TypeOf(v)
406 val := reflect.ValueOf(v)
407 if typ.Kind() == reflect.Ptr {
408 typ = typ.Elem()
409 val = val.Elem()
410 } else {
411 return errors.New("cannot reflect from non-pointer struct")
412 }
413
414 return s.reflectFrom(val)
415}
416
417// ReflectFrom reflects file from given struct.
418func (f *File) ReflectFrom(v interface{}) error {
419 return f.Section("").ReflectFrom(v)
420}
421
422// ReflectFrom reflects data sources from given struct with name mapper.
423func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
424 cfg.NameMapper = mapper
425 return cfg.ReflectFrom(v)
426}
427
428// ReflectFrom reflects data sources from given struct.
429func ReflectFrom(cfg *File, v interface{}) error {
430 return ReflectFromWithMapper(cfg, v, nil)
431}
diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/LICENSE
@@ -0,0 +1,354 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. “Contributor”
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. “Contributor Version”
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor’s Contribution.
14
151.3. “Contribution”
16
17 means Covered Software of a particular Contributor.
18
191.4. “Covered Software”
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. “Incompatible With Secondary Licenses”
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of version
33 1.1 or earlier of the License, but not also under the terms of a
34 Secondary License.
35
361.6. “Executable Form”
37
38 means any form of the work other than Source Code Form.
39
401.7. “Larger Work”
41
42 means a work that combines Covered Software with other material, in a separate
43 file or files, that is not Covered Software.
44
451.8. “License”
46
47 means this document.
48
491.9. “Licensable”
50
51 means having the right to grant, to the maximum extent possible, whether at the
52 time of the initial grant or subsequently, any and all of the rights conveyed by
53 this License.
54
551.10. “Modifications”
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to, deletion
60 from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. “Patent Claims” of a Contributor
65
66 means any patent claim(s), including without limitation, method, process,
67 and apparatus claims, in any patent Licensable by such Contributor that
68 would be infringed, but for the grant of the License, by the making,
69 using, selling, offering for sale, having made, import, or transfer of
70 either its Contributions or its Contributor Version.
71
721.12. “Secondary License”
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. “Source Code Form”
79
80 means the form of the work preferred for making modifications.
81
821.14. “You” (or “Your”)
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, “You” includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, “control” means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or as
104 part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its Contributions
108 or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution become
113 effective for each Contribution on the date the Contributor first distributes
114 such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under this
119 License. No additional rights or licenses will be implied from the distribution
120 or licensing of Covered Software under this License. Notwithstanding Section
121 2.1(b) above, no patent license is granted by a Contributor:
122
123 a. for any code that a Contributor has removed from Covered Software; or
124
125 b. for infringements caused by: (i) Your and any other third party’s
126 modifications of Covered Software, or (ii) the combination of its
127 Contributions with other software (except as part of its Contributor
128 Version); or
129
130 c. under Patent Claims infringed by Covered Software in the absence of its
131 Contributions.
132
133 This License does not grant any rights in the trademarks, service marks, or
134 logos of any Contributor (except as may be necessary to comply with the
135 notice requirements in Section 3.4).
136
1372.4. Subsequent Licenses
138
139 No Contributor makes additional grants as a result of Your choice to
140 distribute the Covered Software under a subsequent version of this License
141 (see Section 10.2) or under the terms of a Secondary License (if permitted
142 under the terms of Section 3.3).
143
1442.5. Representation
145
146 Each Contributor represents that the Contributor believes its Contributions
147 are its original creation(s) or it has sufficient rights to grant the
148 rights to its Contributions conveyed by this License.
149
1502.6. Fair Use
151
152 This License is not intended to limit any rights You have under applicable
153 copyright doctrines of fair use, fair dealing, or other equivalents.
154
1552.7. Conditions
156
157 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
158 Section 2.1.
159
160
1613. Responsibilities
162
1633.1. Distribution of Source Form
164
165 All distribution of Covered Software in Source Code Form, including any
166 Modifications that You create or to which You contribute, must be under the
167 terms of this License. You must inform recipients that the Source Code Form
168 of the Covered Software is governed by the terms of this License, and how
169 they can obtain a copy of this License. You may not attempt to alter or
170 restrict the recipients’ rights in the Source Code Form.
171
1723.2. Distribution of Executable Form
173
174 If You distribute Covered Software in Executable Form then:
175
176 a. such Covered Software must also be made available in Source Code Form,
177 as described in Section 3.1, and You must inform recipients of the
178 Executable Form how they can obtain a copy of such Source Code Form by
179 reasonable means in a timely manner, at a charge no more than the cost
180 of distribution to the recipient; and
181
182 b. You may distribute such Executable Form under the terms of this License,
183 or sublicense it under different terms, provided that the license for
184 the Executable Form does not attempt to limit or alter the recipients’
185 rights in the Source Code Form under this License.
186
1873.3. Distribution of a Larger Work
188
189 You may create and distribute a Larger Work under terms of Your choice,
190 provided that You also comply with the requirements of this License for the
191 Covered Software. If the Larger Work is a combination of Covered Software
192 with a work governed by one or more Secondary Licenses, and the Covered
193 Software is not Incompatible With Secondary Licenses, this License permits
194 You to additionally distribute such Covered Software under the terms of
195 such Secondary License(s), so that the recipient of the Larger Work may, at
196 their option, further distribute the Covered Software under the terms of
197 either this License or such Secondary License(s).
198
1993.4. Notices
200
201 You may not remove or alter the substance of any license notices (including
202 copyright notices, patent notices, disclaimers of warranty, or limitations
203 of liability) contained within the Source Code Form of the Covered
204 Software, except that You may alter any license notices to the extent
205 required to remedy known factual inaccuracies.
206
2073.5. Application of Additional Terms
208
209 You may choose to offer, and to charge a fee for, warranty, support,
210 indemnity or liability obligations to one or more recipients of Covered
211 Software. However, You may do so only on Your own behalf, and not on behalf
212 of any Contributor. You must make it absolutely clear that any such
213 warranty, support, indemnity, or liability obligation is offered by You
214 alone, and You hereby agree to indemnify every Contributor for any
215 liability incurred by such Contributor as a result of warranty, support,
216 indemnity or liability terms You offer. You may include additional
217 disclaimers of warranty and limitations of liability specific to any
218 jurisdiction.
219
2204. Inability to Comply Due to Statute or Regulation
221
222 If it is impossible for You to comply with any of the terms of this License
223 with respect to some or all of the Covered Software due to statute, judicial
224 order, or regulation then You must: (a) comply with the terms of this License
225 to the maximum extent possible; and (b) describe the limitations and the code
226 they affect. Such description must be placed in a text file included with all
227 distributions of the Covered Software under this License. Except to the
228 extent prohibited by statute or regulation, such description must be
229 sufficiently detailed for a recipient of ordinary skill to be able to
230 understand it.
231
2325. Termination
233
2345.1. The rights granted under this License will terminate automatically if You
235 fail to comply with any of its terms. However, if You become compliant,
236 then the rights granted under this License from a particular Contributor
237 are reinstated (a) provisionally, unless and until such Contributor
238 explicitly and finally terminates Your grants, and (b) on an ongoing basis,
239 if such Contributor fails to notify You of the non-compliance by some
240 reasonable means prior to 60 days after You have come back into compliance.
241 Moreover, Your grants from a particular Contributor are reinstated on an
242 ongoing basis if such Contributor notifies You of the non-compliance by
243 some reasonable means, this is the first time You have received notice of
244 non-compliance with this License from such Contributor, and You become
245 compliant prior to 30 days after Your receipt of the notice.
246
2475.2. If You initiate litigation against any entity by asserting a patent
248 infringement claim (excluding declaratory judgment actions, counter-claims,
249 and cross-claims) alleging that a Contributor Version directly or
250 indirectly infringes any patent, then the rights granted to You by any and
251 all Contributors for the Covered Software under Section 2.1 of this License
252 shall terminate.
253
2545.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
255 license agreements (excluding distributors and resellers) which have been
256 validly granted by You or Your distributors under this License prior to
257 termination shall survive termination.
258
2596. Disclaimer of Warranty
260
261 Covered Software is provided under this License on an “as is” basis, without
262 warranty of any kind, either expressed, implied, or statutory, including,
263 without limitation, warranties that the Covered Software is free of defects,
264 merchantable, fit for a particular purpose or non-infringing. The entire
265 risk as to the quality and performance of the Covered Software is with You.
266 Should any Covered Software prove defective in any respect, You (not any
267 Contributor) assume the cost of any necessary servicing, repair, or
268 correction. This disclaimer of warranty constitutes an essential part of this
269 License. No use of any Covered Software is authorized under this License
270 except under this disclaimer.
271
2727. Limitation of Liability
273
274 Under no circumstances and under no legal theory, whether tort (including
275 negligence), contract, or otherwise, shall any Contributor, or anyone who
276 distributes Covered Software as permitted above, be liable to You for any
277 direct, indirect, special, incidental, or consequential damages of any
278 character including, without limitation, damages for lost profits, loss of
279 goodwill, work stoppage, computer failure or malfunction, or any and all
280 other commercial damages or losses, even if such party shall have been
281 informed of the possibility of such damages. This limitation of liability
282 shall not apply to liability for death or personal injury resulting from such
283 party’s negligence to the extent applicable law prohibits such limitation.
284 Some jurisdictions do not allow the exclusion or limitation of incidental or
285 consequential damages, so this exclusion and limitation may not apply to You.
286
2878. Litigation
288
289 Any litigation relating to this License may be brought only in the courts of
290 a jurisdiction where the defendant maintains its principal place of business
291 and such litigation shall be governed by laws of that jurisdiction, without
292 reference to its conflict-of-law provisions. Nothing in this Section shall
293 prevent a party’s ability to bring cross-claims or counter-claims.
294
2959. Miscellaneous
296
297 This License represents the complete agreement concerning the subject matter
298 hereof. If any provision of this License is held to be unenforceable, such
299 provision shall be reformed only to the extent necessary to make it
300 enforceable. Any law or regulation which provides that the language of a
301 contract shall be construed against the drafter shall not be used to construe
302 this License against a Contributor.
303
304
30510. Versions of the License
306
30710.1. New Versions
308
309 Mozilla Foundation is the license steward. Except as provided in Section
310 10.3, no one other than the license steward has the right to modify or
311 publish new versions of this License. Each version will be given a
312 distinguishing version number.
313
31410.2. Effect of New Versions
315
316 You may distribute the Covered Software under the terms of the version of
317 the License under which You originally received the Covered Software, or
318 under the terms of any subsequent version published by the license
319 steward.
320
32110.3. Modified Versions
322
323 If you create software not governed by this License, and you want to
324 create a new license for such software, you may create and use a modified
325 version of this License if you rename the license and remove any
326 references to the name of the license steward (except to note that such
327 modified license differs from this License).
328
32910.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
330 If You choose to distribute Source Code Form that is Incompatible With
331 Secondary Licenses under the terms of this version of the License, the
332 notice described in Exhibit B of this License must be attached.
333
334Exhibit A - Source Code Form License Notice
335
336 This Source Code Form is subject to the
337 terms of the Mozilla Public License, v.
338 2.0. If a copy of the MPL was not
339 distributed with this file, You can
340 obtain one at
341 http://mozilla.org/MPL/2.0/.
342
343If it is not possible or desirable to put the notice in a particular file, then
344You may include the notice in a location (such as a LICENSE file in a relevant
345directory) where a recipient would be likely to look for such a notice.
346
347You may add additional accurate notices of copyright ownership.
348
349Exhibit B - “Incompatible With Secondary Licenses” Notice
350
351 This Source Code Form is “Incompatible
352 With Secondary Licenses”, as defined by
353 the Mozilla Public License, v. 2.0.
354
diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md
new file mode 100644
index 0000000..1c95f59
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/README.md
@@ -0,0 +1,89 @@
1# errwrap
2
3`errwrap` is a package for Go that formalizes the pattern of wrapping errors
4and checking if an error contains another error.
5
6There is a common pattern in Go of taking a returned `error` value and
7then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
8with this pattern is that you completely lose the original `error` structure.
9
10Arguably the _correct_ approach is that you should make a custom structure
11implementing the `error` interface, and have the original error as a field
12on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
13This is a good approach, but you have to know the entire chain of possible
14rewrapping that happens, when you might just care about one.
15
16`errwrap` formalizes this pattern (it doesn't matter what approach you use
17above) by giving a single interface for wrapping errors, checking if a specific
18error is wrapped, and extracting that error.
19
20## Installation and Docs
21
22Install using `go get github.com/hashicorp/errwrap`.
23
24Full documentation is available at
25http://godoc.org/github.com/hashicorp/errwrap
26
27## Usage
28
29#### Basic Usage
30
31Below is a very basic example of its usage:
32
33```go
34// A function that always returns an error, but wraps it, like a real
35// function might.
36func tryOpen() error {
37 _, err := os.Open("/i/dont/exist")
38 if err != nil {
39 return errwrap.Wrapf("Doesn't exist: {{err}}", err)
40 }
41
42 return nil
43}
44
45func main() {
46 err := tryOpen()
47
48 // We can use the Contains helpers to check if an error contains
49 // another error. It is safe to do this with a nil error, or with
50 // an error that doesn't even use the errwrap package.
51 if errwrap.Contains(err, ErrNotExist) {
52 // Do something
53 }
54 if errwrap.ContainsType(err, new(os.PathError)) {
55 // Do something
56 }
57
58 // Or we can use the associated `Get` functions to just extract
59 // a specific error. This would return nil if that specific error doesn't
60 // exist.
61 perr := errwrap.GetType(err, new(os.PathError))
62}
63```
64
65#### Custom Types
66
67If you're already making custom types that properly wrap errors, then
68you can get all the functionality of `errwraps.Contains` and such by
69implementing the `Wrapper` interface with just one function. Example:
70
71```go
72type AppError {
73 Code ErrorCode
74 Err error
75}
76
77func (e *AppError) WrappedErrors() []error {
78 return []error{e.Err}
79}
80```
81
82Now this works:
83
84```go
85err := &AppError{Err: fmt.Errorf("an error")}
86if errwrap.ContainsType(err, fmt.Errorf("")) {
87 // This will work!
88}
89```
diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go
new file mode 100644
index 0000000..a733bef
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/errwrap.go
@@ -0,0 +1,169 @@
1// Package errwrap implements methods to formalize error wrapping in Go.
2//
3// All of the top-level functions that take an `error` are built to be able
4// to take any error, not just wrapped errors. This allows you to use errwrap
5// without having to type-check and type-cast everywhere.
6package errwrap
7
8import (
9 "errors"
10 "reflect"
11 "strings"
12)
13
14// WalkFunc is the callback called for Walk.
15type WalkFunc func(error)
16
17// Wrapper is an interface that can be implemented by custom types to
18// have all the Contains, Get, etc. functions in errwrap work.
19//
20// When Walk reaches a Wrapper, it will call the callback for every
21// wrapped error in addition to the wrapper itself. Since all the top-level
22// functions in errwrap use Walk, this means that all those functions work
23// with your custom type.
24type Wrapper interface {
25 WrappedErrors() []error
26}
27
28// Wrap defines that outer wraps inner, returning an error type that
29// can be cleanly used with the other methods in this package, such as
30// Contains, GetAll, etc.
31//
32// This function won't modify the error message at all (the outer message
33// will be used).
34func Wrap(outer, inner error) error {
35 return &wrappedError{
36 Outer: outer,
37 Inner: inner,
38 }
39}
40
41// Wrapf wraps an error with a formatting message. This is similar to using
42// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap
43// errors, you should replace it with this.
44//
45// format is the format of the error message. The string '{{err}}' will
46// be replaced with the original error message.
47func Wrapf(format string, err error) error {
48 outerMsg := "<nil>"
49 if err != nil {
50 outerMsg = err.Error()
51 }
52
53 outer := errors.New(strings.Replace(
54 format, "{{err}}", outerMsg, -1))
55
56 return Wrap(outer, err)
57}
58
59// Contains checks if the given error contains an error with the
60// message msg. If err is not a wrapped error, this will always return
61// false unless the error itself happens to match this msg.
62func Contains(err error, msg string) bool {
63 return len(GetAll(err, msg)) > 0
64}
65
66// ContainsType checks if the given error contains an error with
67// the same concrete type as v. If err is not a wrapped error, this will
68// check the err itself.
69func ContainsType(err error, v interface{}) bool {
70 return len(GetAllType(err, v)) > 0
71}
72
73// Get is the same as GetAll but returns the deepest matching error.
74func Get(err error, msg string) error {
75 es := GetAll(err, msg)
76 if len(es) > 0 {
77 return es[len(es)-1]
78 }
79
80 return nil
81}
82
83// GetType is the same as GetAllType but returns the deepest matching error.
84func GetType(err error, v interface{}) error {
85 es := GetAllType(err, v)
86 if len(es) > 0 {
87 return es[len(es)-1]
88 }
89
90 return nil
91}
92
93// GetAll gets all the errors that might be wrapped in err with the
94// given message. The order of the errors is such that the outermost
95// matching error (the most recent wrap) is index zero, and so on.
96func GetAll(err error, msg string) []error {
97 var result []error
98
99 Walk(err, func(err error) {
100 if err.Error() == msg {
101 result = append(result, err)
102 }
103 })
104
105 return result
106}
107
108// GetAllType gets all the errors that are the same type as v.
109//
110// The order of the return value is the same as described in GetAll.
111func GetAllType(err error, v interface{}) []error {
112 var result []error
113
114 var search string
115 if v != nil {
116 search = reflect.TypeOf(v).String()
117 }
118 Walk(err, func(err error) {
119 var needle string
120 if err != nil {
121 needle = reflect.TypeOf(err).String()
122 }
123
124 if needle == search {
125 result = append(result, err)
126 }
127 })
128
129 return result
130}
131
132// Walk walks all the wrapped errors in err and calls the callback. If
133// err isn't a wrapped error, this will be called once for err. If err
134// is a wrapped error, the callback will be called for both the wrapper
135// that implements error as well as the wrapped error itself.
136func Walk(err error, cb WalkFunc) {
137 if err == nil {
138 return
139 }
140
141 switch e := err.(type) {
142 case *wrappedError:
143 cb(e.Outer)
144 Walk(e.Inner, cb)
145 case Wrapper:
146 cb(err)
147
148 for _, err := range e.WrappedErrors() {
149 Walk(err, cb)
150 }
151 default:
152 cb(err)
153 }
154}
155
156// wrappedError is an implementation of error that has both the
157// outer and inner errors.
158type wrappedError struct {
159 Outer error
160 Inner error
161}
162
163func (w *wrappedError) Error() string {
164 return w.Outer.Error()
165}
166
167func (w *wrappedError) WrappedErrors() []error {
168 return []error{w.Outer, w.Inner}
169}
diff --git a/vendor/github.com/hashicorp/go-getter/LICENSE b/vendor/github.com/hashicorp/go-getter/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/LICENSE
@@ -0,0 +1,354 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. “Contributor”
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. “Contributor Version”
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor’s Contribution.
14
151.3. “Contribution”
16
17 means Covered Software of a particular Contributor.
18
191.4. “Covered Software”
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. “Incompatible With Secondary Licenses”
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of version
33 1.1 or earlier of the License, but not also under the terms of a
34 Secondary License.
35
361.6. “Executable Form”
37
38 means any form of the work other than Source Code Form.
39
401.7. “Larger Work”
41
42 means a work that combines Covered Software with other material, in a separate
43 file or files, that is not Covered Software.
44
451.8. “License”
46
47 means this document.
48
491.9. “Licensable”
50
51 means having the right to grant, to the maximum extent possible, whether at the
52 time of the initial grant or subsequently, any and all of the rights conveyed by
53 this License.
54
551.10. “Modifications”
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to, deletion
60 from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. “Patent Claims” of a Contributor
65
66 means any patent claim(s), including without limitation, method, process,
67 and apparatus claims, in any patent Licensable by such Contributor that
68 would be infringed, but for the grant of the License, by the making,
69 using, selling, offering for sale, having made, import, or transfer of
70 either its Contributions or its Contributor Version.
71
721.12. “Secondary License”
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. “Source Code Form”
79
80 means the form of the work preferred for making modifications.
81
821.14. “You” (or “Your”)
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, “You” includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, “control” means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or as
104 part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its Contributions
108 or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution become
113 effective for each Contribution on the date the Contributor first distributes
114 such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under this
119 License. No additional rights or licenses will be implied from the distribution
120 or licensing of Covered Software under this License. Notwithstanding Section
121 2.1(b) above, no patent license is granted by a Contributor:
122
123 a. for any code that a Contributor has removed from Covered Software; or
124
125 b. for infringements caused by: (i) Your and any other third party’s
126 modifications of Covered Software, or (ii) the combination of its
127 Contributions with other software (except as part of its Contributor
128 Version); or
129
130 c. under Patent Claims infringed by Covered Software in the absence of its
131 Contributions.
132
133 This License does not grant any rights in the trademarks, service marks, or
134 logos of any Contributor (except as may be necessary to comply with the
135 notice requirements in Section 3.4).
136
1372.4. Subsequent Licenses
138
139 No Contributor makes additional grants as a result of Your choice to
140 distribute the Covered Software under a subsequent version of this License
141 (see Section 10.2) or under the terms of a Secondary License (if permitted
142 under the terms of Section 3.3).
143
1442.5. Representation
145
146 Each Contributor represents that the Contributor believes its Contributions
147 are its original creation(s) or it has sufficient rights to grant the
148 rights to its Contributions conveyed by this License.
149
1502.6. Fair Use
151
152 This License is not intended to limit any rights You have under applicable
153 copyright doctrines of fair use, fair dealing, or other equivalents.
154
1552.7. Conditions
156
157 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
158 Section 2.1.
159
160
1613. Responsibilities
162
1633.1. Distribution of Source Form
164
165 All distribution of Covered Software in Source Code Form, including any
166 Modifications that You create or to which You contribute, must be under the
167 terms of this License. You must inform recipients that the Source Code Form
168 of the Covered Software is governed by the terms of this License, and how
169 they can obtain a copy of this License. You may not attempt to alter or
170 restrict the recipients’ rights in the Source Code Form.
171
1723.2. Distribution of Executable Form
173
174 If You distribute Covered Software in Executable Form then:
175
176 a. such Covered Software must also be made available in Source Code Form,
177 as described in Section 3.1, and You must inform recipients of the
178 Executable Form how they can obtain a copy of such Source Code Form by
179 reasonable means in a timely manner, at a charge no more than the cost
180 of distribution to the recipient; and
181
182 b. You may distribute such Executable Form under the terms of this License,
183 or sublicense it under different terms, provided that the license for
184 the Executable Form does not attempt to limit or alter the recipients’
185 rights in the Source Code Form under this License.
186
1873.3. Distribution of a Larger Work
188
189 You may create and distribute a Larger Work under terms of Your choice,
190 provided that You also comply with the requirements of this License for the
191 Covered Software. If the Larger Work is a combination of Covered Software
192 with a work governed by one or more Secondary Licenses, and the Covered
193 Software is not Incompatible With Secondary Licenses, this License permits
194 You to additionally distribute such Covered Software under the terms of
195 such Secondary License(s), so that the recipient of the Larger Work may, at
196 their option, further distribute the Covered Software under the terms of
197 either this License or such Secondary License(s).
198
1993.4. Notices
200
201 You may not remove or alter the substance of any license notices (including
202 copyright notices, patent notices, disclaimers of warranty, or limitations
203 of liability) contained within the Source Code Form of the Covered
204 Software, except that You may alter any license notices to the extent
205 required to remedy known factual inaccuracies.
206
2073.5. Application of Additional Terms
208
209 You may choose to offer, and to charge a fee for, warranty, support,
210 indemnity or liability obligations to one or more recipients of Covered
211 Software. However, You may do so only on Your own behalf, and not on behalf
212 of any Contributor. You must make it absolutely clear that any such
213 warranty, support, indemnity, or liability obligation is offered by You
214 alone, and You hereby agree to indemnify every Contributor for any
215 liability incurred by such Contributor as a result of warranty, support,
216 indemnity or liability terms You offer. You may include additional
217 disclaimers of warranty and limitations of liability specific to any
218 jurisdiction.
219
2204. Inability to Comply Due to Statute or Regulation
221
222 If it is impossible for You to comply with any of the terms of this License
223 with respect to some or all of the Covered Software due to statute, judicial
224 order, or regulation then You must: (a) comply with the terms of this License
225 to the maximum extent possible; and (b) describe the limitations and the code
226 they affect. Such description must be placed in a text file included with all
227 distributions of the Covered Software under this License. Except to the
228 extent prohibited by statute or regulation, such description must be
229 sufficiently detailed for a recipient of ordinary skill to be able to
230 understand it.
231
2325. Termination
233
2345.1. The rights granted under this License will terminate automatically if You
235 fail to comply with any of its terms. However, if You become compliant,
236 then the rights granted under this License from a particular Contributor
237 are reinstated (a) provisionally, unless and until such Contributor
238 explicitly and finally terminates Your grants, and (b) on an ongoing basis,
239 if such Contributor fails to notify You of the non-compliance by some
240 reasonable means prior to 60 days after You have come back into compliance.
241 Moreover, Your grants from a particular Contributor are reinstated on an
242 ongoing basis if such Contributor notifies You of the non-compliance by
243 some reasonable means, this is the first time You have received notice of
244 non-compliance with this License from such Contributor, and You become
245 compliant prior to 30 days after Your receipt of the notice.
246
2475.2. If You initiate litigation against any entity by asserting a patent
248 infringement claim (excluding declaratory judgment actions, counter-claims,
249 and cross-claims) alleging that a Contributor Version directly or
250 indirectly infringes any patent, then the rights granted to You by any and
251 all Contributors for the Covered Software under Section 2.1 of this License
252 shall terminate.
253
2545.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
255 license agreements (excluding distributors and resellers) which have been
256 validly granted by You or Your distributors under this License prior to
257 termination shall survive termination.
258
2596. Disclaimer of Warranty
260
261 Covered Software is provided under this License on an “as is” basis, without
262 warranty of any kind, either expressed, implied, or statutory, including,
263 without limitation, warranties that the Covered Software is free of defects,
264 merchantable, fit for a particular purpose or non-infringing. The entire
265 risk as to the quality and performance of the Covered Software is with You.
266 Should any Covered Software prove defective in any respect, You (not any
267 Contributor) assume the cost of any necessary servicing, repair, or
268 correction. This disclaimer of warranty constitutes an essential part of this
269 License. No use of any Covered Software is authorized under this License
270 except under this disclaimer.
271
2727. Limitation of Liability
273
274 Under no circumstances and under no legal theory, whether tort (including
275 negligence), contract, or otherwise, shall any Contributor, or anyone who
276 distributes Covered Software as permitted above, be liable to You for any
277 direct, indirect, special, incidental, or consequential damages of any
278 character including, without limitation, damages for lost profits, loss of
279 goodwill, work stoppage, computer failure or malfunction, or any and all
280 other commercial damages or losses, even if such party shall have been
281 informed of the possibility of such damages. This limitation of liability
282 shall not apply to liability for death or personal injury resulting from such
283 party’s negligence to the extent applicable law prohibits such limitation.
284 Some jurisdictions do not allow the exclusion or limitation of incidental or
285 consequential damages, so this exclusion and limitation may not apply to You.
286
2878. Litigation
288
289 Any litigation relating to this License may be brought only in the courts of
290 a jurisdiction where the defendant maintains its principal place of business
291 and such litigation shall be governed by laws of that jurisdiction, without
292 reference to its conflict-of-law provisions. Nothing in this Section shall
293 prevent a party’s ability to bring cross-claims or counter-claims.
294
2959. Miscellaneous
296
297 This License represents the complete agreement concerning the subject matter
298 hereof. If any provision of this License is held to be unenforceable, such
299 provision shall be reformed only to the extent necessary to make it
300 enforceable. Any law or regulation which provides that the language of a
301 contract shall be construed against the drafter shall not be used to construe
302 this License against a Contributor.
303
304
30510. Versions of the License
306
30710.1. New Versions
308
309 Mozilla Foundation is the license steward. Except as provided in Section
310 10.3, no one other than the license steward has the right to modify or
311 publish new versions of this License. Each version will be given a
312 distinguishing version number.
313
31410.2. Effect of New Versions
315
316 You may distribute the Covered Software under the terms of the version of
317 the License under which You originally received the Covered Software, or
318 under the terms of any subsequent version published by the license
319 steward.
320
32110.3. Modified Versions
322
323 If you create software not governed by this License, and you want to
324 create a new license for such software, you may create and use a modified
325 version of this License if you rename the license and remove any
326 references to the name of the license steward (except to note that such
327 modified license differs from this License).
328
32910.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
330 If You choose to distribute Source Code Form that is Incompatible With
331 Secondary Licenses under the terms of this version of the License, the
332 notice described in Exhibit B of this License must be attached.
333
334Exhibit A - Source Code Form License Notice
335
336 This Source Code Form is subject to the
337 terms of the Mozilla Public License, v.
338 2.0. If a copy of the MPL was not
339 distributed with this file, You can
340 obtain one at
341 http://mozilla.org/MPL/2.0/.
342
343If it is not possible or desirable to put the notice in a particular file, then
344You may include the notice in a location (such as a LICENSE file in a relevant
345directory) where a recipient would be likely to look for such a notice.
346
347You may add additional accurate notices of copyright ownership.
348
349Exhibit B - “Incompatible With Secondary Licenses” Notice
350
351 This Source Code Form is “Incompatible
352 With Secondary Licenses”, as defined by
353 the Mozilla Public License, v. 2.0.
354
diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md
new file mode 100644
index 0000000..4a0b6a6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/README.md
@@ -0,0 +1,253 @@
1# go-getter
2
3[![Build Status](http://img.shields.io/travis/hashicorp/go-getter.svg?style=flat-square)][travis]
4[![Build status](https://ci.appveyor.com/api/projects/status/ulq3qr43n62croyq/branch/master?svg=true)][appveyor]
5[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
6
7[travis]: http://travis-ci.org/hashicorp/go-getter
8[godocs]: http://godoc.org/github.com/hashicorp/go-getter
9[appveyor]: https://ci.appveyor.com/project/hashicorp/go-getter/branch/master
10
11go-getter is a library for Go (golang) for downloading files or directories
12from various sources using a URL as the primary form of input.
13
14The power of this library is being flexible in being able to download
15from a number of different sources (file paths, Git, HTTP, Mercurial, etc.)
16using a single string as input. This removes the burden of knowing how to
17download from a variety of sources from the implementer.
18
19The concept of a _detector_ automatically turns invalid URLs into proper
20URLs. For example: "github.com/hashicorp/go-getter" would turn into a
21Git URL. Or "./foo" would turn into a file URL. These are extensible.
22
23This library is used by [Terraform](https://terraform.io) for
24downloading modules, [Otto](https://ottoproject.io) for dependencies and
25Appfile imports, and [Nomad](https://nomadproject.io) for downloading
26binaries.
27
28## Installation and Usage
29
30Package documentation can be found on
31[GoDoc](http://godoc.org/github.com/hashicorp/go-getter).
32
33Installation can be done with a normal `go get`:
34
35```
36$ go get github.com/hashicorp/go-getter
37```
38
39go-getter also has a command you can use to test URL strings:
40
41```
42$ go install github.com/hashicorp/go-getter/cmd/go-getter
43...
44
45$ go-getter github.com/foo/bar ./foo
46...
47```
48
49The command is useful for verifying URL structures.
50
51## URL Format
52
53go-getter uses a single string URL as input to download from a variety of
54protocols. go-getter has various "tricks" with this URL to do certain things.
55This section documents the URL format.
56
57### Supported Protocols and Detectors
58
59**Protocols** are used to download files/directories using a specific
60mechanism. Example protocols are Git and HTTP.
61
62**Detectors** are used to transform a valid or invalid URL into another
63URL if it matches a certain pattern. Example: "github.com/user/repo" is
64automatically transformed into a fully valid Git URL. This allows go-getter
65to be very user friendly.
66
67go-getter out of the box supports the following protocols. Additional protocols
68can be augmented at runtime by implementing the `Getter` interface.
69
70 * Local files
71 * Git
72 * Mercurial
73 * HTTP
74 * Amazon S3
75
76In addition to the above protocols, go-getter has what are called "detectors."
77These take a URL and attempt to automatically choose the best protocol for
78it, which might involve even changing the protocol. The following detection
79is built-in by default:
80
81 * File paths such as "./foo" are automatically changed to absolute
82 file URLs.
83 * GitHub URLs, such as "github.com/mitchellh/vagrant" are automatically
84 changed to Git protocol over HTTP.
85 * BitBucket URLs, such as "bitbucket.org/mitchellh/vagrant" are automatically
86 changed to a Git or mercurial protocol using the BitBucket API.
87
88### Forced Protocol
89
90In some cases, the protocol to use is ambiguous depending on the source
91URL. For example, "http://github.com/mitchellh/vagrant.git" could reference
92an HTTP URL or a Git URL. Forced protocol syntax is used to disambiguate this
93URL.
94
95Forced protocol can be done by prefixing the URL with the protocol followed
96by double colons. For example: `git::http://github.com/mitchellh/vagrant.git`
97would download the given HTTP URL using the Git protocol.
98
99Forced protocols will also override any detectors.
100
101In the absense of a forced protocol, detectors may be run on the URL, transforming
102the protocol anyways. The above example would've used the Git protocol either
103way since the Git detector would've detected it was a GitHub URL.
104
105### Protocol-Specific Options
106
107Each protocol can support protocol-specific options to configure that
108protocol. For example, the `git` protocol supports specifying a `ref`
109query parameter that tells it what ref to checkout for that Git
110repository.
111
112The options are specified as query parameters on the URL (or URL-like string)
113given to go-getter. Using the Git example above, the URL below is a valid
114input to go-getter:
115
116 github.com/hashicorp/go-getter?ref=abcd1234
117
118The protocol-specific options are documented below the URL format
119section. But because they are part of the URL, we point it out here so
120you know they exist.
121
122### Checksumming
123
124For file downloads of any protocol, go-getter can automatically verify
125a checksum for you. Note that checksumming only works for downloading files,
126not directories, but checksumming will work for any protocol.
127
128To checksum a file, append a `checksum` query parameter to the URL.
129The paramter value should be in the format of `type:value`, where
130type is "md5", "sha1", "sha256", or "sha512". The "value" should be
131the actual checksum value. go-getter will parse out this query parameter
132automatically and use it to verify the checksum. An example URL
133is shown below:
134
135```
136./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21
137```
138
139The checksum query parameter is never sent to the backend protocol
140implementation. It is used at a higher level by go-getter itself.
141
142### Unarchiving
143
144go-getter will automatically unarchive files into a file or directory
145based on the extension of the file being requested (over any protocol).
146This works for both file and directory downloads.
147
148go-getter looks for an `archive` query parameter to specify the format of
149the archive. If this isn't specified, go-getter will use the extension of
150the path to see if it appears archived. Unarchiving can be explicitly
151disabled by setting the `archive` query parameter to `false`.
152
153The following archive formats are supported:
154
155 * `tar.gz` and `tgz`
156 * `tar.bz2` and `tbz2`
157 * `zip`
158 * `gz`
159 * `bz2`
160
161For example, an example URL is shown below:
162
163```
164./foo.zip
165```
166
167This will automatically be inferred to be a ZIP file and will be extracted.
168You can also be explicit about the archive type:
169
170```
171./some/other/path?archive=zip
172```
173
174And finally, you can disable archiving completely:
175
176```
177./some/path?archive=false
178```
179
180You can combine unarchiving with the other features of go-getter such
181as checksumming. The special `archive` query parameter will be removed
182from the URL before going to the final protocol downloader.
183
184## Protocol-Specific Options
185
186This section documents the protocol-specific options that can be specified
187for go-getter. These options should be appended to the input as normal query
188parameters. Depending on the usage of go-getter, applications may provide
189alternate ways of inputting options. For example, [Nomad](https://www.nomadproject.io)
190provides a nice options block for specifying options rather than in the URL.
191
192## General (All Protocols)
193
194The options below are available to all protocols:
195
196 * `archive` - The archive format to use to unarchive this file, or "" (empty
197 string) to disable unarchiving. For more details, see the complete section
198 on archive support above.
199
200 * `checksum` - Checksum to verify the downloaded file or archive. See
201 the entire section on checksumming above for format and more details.
202
203### Local Files (`file`)
204
205None
206
207### Git (`git`)
208
209 * `ref` - The Git ref to checkout. This is a ref, so it can point to
210 a commit SHA, a branch name, etc. If it is a named ref such as a branch
211 name, go-getter will update it to the latest on each get.
212
213 * `sshkey` - An SSH private key to use during clones. The provided key must
214 be a base64-encoded string. For example, to generate a suitable `sshkey`
215 from a private key file on disk, you would run `base64 -w0 <file>`.
216
217 **Note**: Git 2.3+ is required to use this feature.
218
219### Mercurial (`hg`)
220
221 * `rev` - The Mercurial revision to checkout.
222
223### HTTP (`http`)
224
225None
226
227### S3 (`s3`)
228
229S3 takes various access configurations in the URL. Note that it will also
230read these from standard AWS environment variables if they're set. If
231the query parameters are present, these take priority.
232
233 * `aws_access_key_id` - AWS access key.
234 * `aws_access_key_secret` - AWS access key secret.
235 * `aws_access_token` - AWS access token if this is being used.
236
237#### Using IAM Instance Profiles with S3
238
239If you use go-getter and want to use an EC2 IAM Instance Profile to avoid
240using credentials, then just omit these and the profile, if available will
241be used automatically.
242
243#### S3 Bucket Examples
244
245S3 has several addressing schemes used to reference your bucket. These are
246listed here: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
247
248Some examples for these addressing schemes:
249- s3::https://s3.amazonaws.com/bucket/foo
250- s3::https://s3-eu-west-1.amazonaws.com/bucket/foo
251- bucket.s3.amazonaws.com/foo
252- bucket.s3-eu-west-1.amazonaws.com/foo/bar
253
diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml
new file mode 100644
index 0000000..159dad4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/appveyor.yml
@@ -0,0 +1,16 @@
1version: "build-{branch}-{build}"
2image: Visual Studio 2015
3clone_folder: c:\gopath\github.com\hashicorp\go-getter
4environment:
5 GOPATH: c:\gopath
6install:
7- cmd: >-
8 echo %Path%
9
10 go version
11
12 go env
13
14 go get -d -v -t ./...
15build_script:
16- cmd: go test -v ./...
diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go
new file mode 100644
index 0000000..876812a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/client.go
@@ -0,0 +1,335 @@
1package getter
2
3import (
4 "bytes"
5 "crypto/md5"
6 "crypto/sha1"
7 "crypto/sha256"
8 "crypto/sha512"
9 "encoding/hex"
10 "fmt"
11 "hash"
12 "io"
13 "io/ioutil"
14 "os"
15 "path/filepath"
16 "strconv"
17 "strings"
18
19 urlhelper "github.com/hashicorp/go-getter/helper/url"
20)
21
22// Client is a client for downloading things.
23//
24// Top-level functions such as Get are shortcuts for interacting with a client.
25// Using a client directly allows more fine-grained control over how downloading
26// is done, as well as customizing the protocols supported.
27type Client struct {
28 // Src is the source URL to get.
29 //
30 // Dst is the path to save the downloaded thing as. If Dir is set to
31 // true, then this should be a directory. If the directory doesn't exist,
32 // it will be created for you.
33 //
34 // Pwd is the working directory for detection. If this isn't set, some
35 // detection may fail. Client will not default pwd to the current
36 // working directory for security reasons.
37 Src string
38 Dst string
39 Pwd string
40
41 // Mode is the method of download the client will use. See ClientMode
42 // for documentation.
43 Mode ClientMode
44
45 // Detectors is the list of detectors that are tried on the source.
46 // If this is nil, then the default Detectors will be used.
47 Detectors []Detector
48
49 // Decompressors is the map of decompressors supported by this client.
50 // If this is nil, then the default value is the Decompressors global.
51 Decompressors map[string]Decompressor
52
53 // Getters is the map of protocols supported by this client. If this
54 // is nil, then the default Getters variable will be used.
55 Getters map[string]Getter
56
57 // Dir, if true, tells the Client it is downloading a directory (versus
58 // a single file). This distinction is necessary since filenames and
59 // directory names follow the same format so disambiguating is impossible
60 // without knowing ahead of time.
61 //
62 // WARNING: deprecated. If Mode is set, that will take precedence.
63 Dir bool
64}
65
66// Get downloads the configured source to the destination.
67func (c *Client) Get() error {
68 // Store this locally since there are cases we swap this
69 mode := c.Mode
70 if mode == ClientModeInvalid {
71 if c.Dir {
72 mode = ClientModeDir
73 } else {
74 mode = ClientModeFile
75 }
76 }
77
78 // Default decompressor value
79 decompressors := c.Decompressors
80 if decompressors == nil {
81 decompressors = Decompressors
82 }
83
84 // Detect the URL. This is safe if it is already detected.
85 detectors := c.Detectors
86 if detectors == nil {
87 detectors = Detectors
88 }
89 src, err := Detect(c.Src, c.Pwd, detectors)
90 if err != nil {
91 return err
92 }
93
94 // Determine if we have a forced protocol, i.e. "git::http://..."
95 force, src := getForcedGetter(src)
96
97 // If there is a subdir component, then we download the root separately
98 // and then copy over the proper subdir.
99 var realDst string
100 dst := c.Dst
101 src, subDir := SourceDirSubdir(src)
102 if subDir != "" {
103 tmpDir, err := ioutil.TempDir("", "tf")
104 if err != nil {
105 return err
106 }
107 if err := os.RemoveAll(tmpDir); err != nil {
108 return err
109 }
110 defer os.RemoveAll(tmpDir)
111
112 realDst = dst
113 dst = tmpDir
114 }
115
116 u, err := urlhelper.Parse(src)
117 if err != nil {
118 return err
119 }
120 if force == "" {
121 force = u.Scheme
122 }
123
124 getters := c.Getters
125 if getters == nil {
126 getters = Getters
127 }
128
129 g, ok := getters[force]
130 if !ok {
131 return fmt.Errorf(
132 "download not supported for scheme '%s'", force)
133 }
134
135 // We have magic query parameters that we use to signal different features
136 q := u.Query()
137
138 // Determine if we have an archive type
139 archiveV := q.Get("archive")
140 if archiveV != "" {
141 // Delete the paramter since it is a magic parameter we don't
142 // want to pass on to the Getter
143 q.Del("archive")
144 u.RawQuery = q.Encode()
145
146 // If we can parse the value as a bool and it is false, then
147 // set the archive to "-" which should never map to a decompressor
148 if b, err := strconv.ParseBool(archiveV); err == nil && !b {
149 archiveV = "-"
150 }
151 }
152 if archiveV == "" {
153 // We don't appear to... but is it part of the filename?
154 matchingLen := 0
155 for k, _ := range decompressors {
156 if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen {
157 archiveV = k
158 matchingLen = len(k)
159 }
160 }
161 }
162
163 // If we have a decompressor, then we need to change the destination
164 // to download to a temporary path. We unarchive this into the final,
165 // real path.
166 var decompressDst string
167 var decompressDir bool
168 decompressor := decompressors[archiveV]
169 if decompressor != nil {
170 // Create a temporary directory to store our archive. We delete
171 // this at the end of everything.
172 td, err := ioutil.TempDir("", "getter")
173 if err != nil {
174 return fmt.Errorf(
175 "Error creating temporary directory for archive: %s", err)
176 }
177 defer os.RemoveAll(td)
178
179 // Swap the download directory to be our temporary path and
180 // store the old values.
181 decompressDst = dst
182 decompressDir = mode != ClientModeFile
183 dst = filepath.Join(td, "archive")
184 mode = ClientModeFile
185 }
186
187 // Determine if we have a checksum
188 var checksumHash hash.Hash
189 var checksumValue []byte
190 if v := q.Get("checksum"); v != "" {
191 // Delete the query parameter if we have it.
192 q.Del("checksum")
193 u.RawQuery = q.Encode()
194
195 // Determine the checksum hash type
196 checksumType := ""
197 idx := strings.Index(v, ":")
198 if idx > -1 {
199 checksumType = v[:idx]
200 }
201 switch checksumType {
202 case "md5":
203 checksumHash = md5.New()
204 case "sha1":
205 checksumHash = sha1.New()
206 case "sha256":
207 checksumHash = sha256.New()
208 case "sha512":
209 checksumHash = sha512.New()
210 default:
211 return fmt.Errorf(
212 "unsupported checksum type: %s", checksumType)
213 }
214
215 // Get the remainder of the value and parse it into bytes
216 b, err := hex.DecodeString(v[idx+1:])
217 if err != nil {
218 return fmt.Errorf("invalid checksum: %s", err)
219 }
220
221 // Set our value
222 checksumValue = b
223 }
224
225 if mode == ClientModeAny {
226 // Ask the getter which client mode to use
227 mode, err = g.ClientMode(u)
228 if err != nil {
229 return err
230 }
231
232 // Destination is the base name of the URL path in "any" mode when
233 // a file source is detected.
234 if mode == ClientModeFile {
235 dst = filepath.Join(dst, filepath.Base(u.Path))
236 }
237 }
238
239 // If we're not downloading a directory, then just download the file
240 // and return.
241 if mode == ClientModeFile {
242 err := g.GetFile(dst, u)
243 if err != nil {
244 return err
245 }
246
247 if checksumHash != nil {
248 if err := checksum(dst, checksumHash, checksumValue); err != nil {
249 return err
250 }
251 }
252
253 if decompressor != nil {
254 // We have a decompressor, so decompress the current destination
255 // into the final destination with the proper mode.
256 err := decompressor.Decompress(decompressDst, dst, decompressDir)
257 if err != nil {
258 return err
259 }
260
261 // Swap the information back
262 dst = decompressDst
263 if decompressDir {
264 mode = ClientModeAny
265 } else {
266 mode = ClientModeFile
267 }
268 }
269
270 // We check the dir value again because it can be switched back
271 // if we were unarchiving. If we're still only Get-ing a file, then
272 // we're done.
273 if mode == ClientModeFile {
274 return nil
275 }
276 }
277
278 // If we're at this point we're either downloading a directory or we've
279 // downloaded and unarchived a directory and we're just checking subdir.
280 // In the case we have a decompressor we don't Get because it was Get
281 // above.
282 if decompressor == nil {
283 // If we're getting a directory, then this is an error. You cannot
284 // checksum a directory. TODO: test
285 if checksumHash != nil {
286 return fmt.Errorf(
287 "checksum cannot be specified for directory download")
288 }
289
290 // We're downloading a directory, which might require a bit more work
291 // if we're specifying a subdir.
292 err := g.Get(dst, u)
293 if err != nil {
294 err = fmt.Errorf("error downloading '%s': %s", src, err)
295 return err
296 }
297 }
298
299 // If we have a subdir, copy that over
300 if subDir != "" {
301 if err := os.RemoveAll(realDst); err != nil {
302 return err
303 }
304 if err := os.MkdirAll(realDst, 0755); err != nil {
305 return err
306 }
307
308 return copyDir(realDst, filepath.Join(dst, subDir), false)
309 }
310
311 return nil
312}
313
314// checksum is a simple method to compute the checksum of a source file
315// and compare it to the given expected value.
316func checksum(source string, h hash.Hash, v []byte) error {
317 f, err := os.Open(source)
318 if err != nil {
319 return fmt.Errorf("Failed to open file for checksum: %s", err)
320 }
321 defer f.Close()
322
323 if _, err := io.Copy(h, f); err != nil {
324 return fmt.Errorf("Failed to hash: %s", err)
325 }
326
327 if actual := h.Sum(nil); !bytes.Equal(actual, v) {
328 return fmt.Errorf(
329 "Checksums did not match.\nExpected: %s\nGot: %s",
330 hex.EncodeToString(v),
331 hex.EncodeToString(actual))
332 }
333
334 return nil
335}
diff --git a/vendor/github.com/hashicorp/go-getter/client_mode.go b/vendor/github.com/hashicorp/go-getter/client_mode.go
new file mode 100644
index 0000000..7f02509
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/client_mode.go
@@ -0,0 +1,24 @@
1package getter
2
3// ClientMode is the mode that the client operates in.
4type ClientMode uint
5
6const (
7 ClientModeInvalid ClientMode = iota
8
9 // ClientModeAny downloads anything it can. In this mode, dst must
10 // be a directory. If src is a file, it is saved into the directory
11 // with the basename of the URL. If src is a directory or archive,
12 // it is unpacked directly into dst.
13 ClientModeAny
14
15 // ClientModeFile downloads a single file. In this mode, dst must
16 // be a file path (doesn't have to exist). src must point to a single
17 // file. It is saved as dst.
18 ClientModeFile
19
20 // ClientModeDir downloads a directory. In this mode, dst must be
21 // a directory path (doesn't have to exist). src must point to an
22 // archive or directory (such as in s3).
23 ClientModeDir
24)
diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go
new file mode 100644
index 0000000..2f58e8a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/copy_dir.go
@@ -0,0 +1,78 @@
1package getter
2
3import (
4 "io"
5 "os"
6 "path/filepath"
7 "strings"
8)
9
10// copyDir copies the src directory contents into dst. Both directories
11// should already exist.
12//
13// If ignoreDot is set to true, then dot-prefixed files/folders are ignored.
14func copyDir(dst string, src string, ignoreDot bool) error {
15 src, err := filepath.EvalSymlinks(src)
16 if err != nil {
17 return err
18 }
19
20 walkFn := func(path string, info os.FileInfo, err error) error {
21 if err != nil {
22 return err
23 }
24 if path == src {
25 return nil
26 }
27
28 if ignoreDot && strings.HasPrefix(filepath.Base(path), ".") {
29 // Skip any dot files
30 if info.IsDir() {
31 return filepath.SkipDir
32 } else {
33 return nil
34 }
35 }
36
37 // The "path" has the src prefixed to it. We need to join our
38 // destination with the path without the src on it.
39 dstPath := filepath.Join(dst, path[len(src):])
40
41 // If we have a directory, make that subdirectory, then continue
42 // the walk.
43 if info.IsDir() {
44 if path == filepath.Join(src, dst) {
45 // dst is in src; don't walk it.
46 return nil
47 }
48
49 if err := os.MkdirAll(dstPath, 0755); err != nil {
50 return err
51 }
52
53 return nil
54 }
55
56 // If we have a file, copy the contents.
57 srcF, err := os.Open(path)
58 if err != nil {
59 return err
60 }
61 defer srcF.Close()
62
63 dstF, err := os.Create(dstPath)
64 if err != nil {
65 return err
66 }
67 defer dstF.Close()
68
69 if _, err := io.Copy(dstF, srcF); err != nil {
70 return err
71 }
72
73 // Chmod it
74 return os.Chmod(dstPath, info.Mode())
75 }
76
77 return filepath.Walk(src, walkFn)
78}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go
new file mode 100644
index 0000000..d18174c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress.go
@@ -0,0 +1,29 @@
1package getter
2
3// Decompressor defines the interface that must be implemented to add
4// support for decompressing a type.
5type Decompressor interface {
6 // Decompress should decompress src to dst. dir specifies whether dst
7 // is a directory or single file. src is guaranteed to be a single file
8 // that exists. dst is not guaranteed to exist already.
9 Decompress(dst, src string, dir bool) error
10}
11
12// Decompressors is the mapping of extension to the Decompressor implementation
13// that will decompress that extension/type.
14var Decompressors map[string]Decompressor
15
16func init() {
17 tbzDecompressor := new(TarBzip2Decompressor)
18 tgzDecompressor := new(TarGzipDecompressor)
19
20 Decompressors = map[string]Decompressor{
21 "bz2": new(Bzip2Decompressor),
22 "gz": new(GzipDecompressor),
23 "tar.bz2": tbzDecompressor,
24 "tar.gz": tgzDecompressor,
25 "tbz2": tbzDecompressor,
26 "tgz": tgzDecompressor,
27 "zip": new(ZipDecompressor),
28 }
29}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go
new file mode 100644
index 0000000..339f4cf
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go
@@ -0,0 +1,45 @@
1package getter
2
3import (
4 "compress/bzip2"
5 "fmt"
6 "io"
7 "os"
8 "path/filepath"
9)
10
11// Bzip2Decompressor is an implementation of Decompressor that can
12// decompress bz2 files.
13type Bzip2Decompressor struct{}
14
15func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error {
16 // Directory isn't supported at all
17 if dir {
18 return fmt.Errorf("bzip2-compressed files can only unarchive to a single file")
19 }
20
21 // If we're going into a directory we should make that first
22 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
23 return err
24 }
25
26 // File first
27 f, err := os.Open(src)
28 if err != nil {
29 return err
30 }
31 defer f.Close()
32
33 // Bzip2 compression is second
34 bzipR := bzip2.NewReader(f)
35
36 // Copy it out
37 dstF, err := os.Create(dst)
38 if err != nil {
39 return err
40 }
41 defer dstF.Close()
42
43 _, err = io.Copy(dstF, bzipR)
44 return err
45}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go
new file mode 100644
index 0000000..2001054
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go
@@ -0,0 +1,49 @@
1package getter
2
3import (
4 "compress/gzip"
5 "fmt"
6 "io"
7 "os"
8 "path/filepath"
9)
10
11// GzipDecompressor is an implementation of Decompressor that can
12// decompress bz2 files.
13type GzipDecompressor struct{}
14
15func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error {
16 // Directory isn't supported at all
17 if dir {
18 return fmt.Errorf("gzip-compressed files can only unarchive to a single file")
19 }
20
21 // If we're going into a directory we should make that first
22 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
23 return err
24 }
25
26 // File first
27 f, err := os.Open(src)
28 if err != nil {
29 return err
30 }
31 defer f.Close()
32
33 // gzip compression is second
34 gzipR, err := gzip.NewReader(f)
35 if err != nil {
36 return err
37 }
38 defer gzipR.Close()
39
40 // Copy it out
41 dstF, err := os.Create(dst)
42 if err != nil {
43 return err
44 }
45 defer dstF.Close()
46
47 _, err = io.Copy(dstF, gzipR)
48 return err
49}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go
new file mode 100644
index 0000000..c46ed44
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go
@@ -0,0 +1,95 @@
1package getter
2
3import (
4 "archive/tar"
5 "compress/bzip2"
6 "fmt"
7 "io"
8 "os"
9 "path/filepath"
10)
11
12// TarBzip2Decompressor is an implementation of Decompressor that can
13// decompress tar.bz2 files.
14type TarBzip2Decompressor struct{}
15
16func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error {
17 // If we're going into a directory we should make that first
18 mkdir := dst
19 if !dir {
20 mkdir = filepath.Dir(dst)
21 }
22 if err := os.MkdirAll(mkdir, 0755); err != nil {
23 return err
24 }
25
26 // File first
27 f, err := os.Open(src)
28 if err != nil {
29 return err
30 }
31 defer f.Close()
32
33 // Bzip2 compression is second
34 bzipR := bzip2.NewReader(f)
35
36 // Once bzip decompressed we have a tar format
37 tarR := tar.NewReader(bzipR)
38 done := false
39 for {
40 hdr, err := tarR.Next()
41 if err == io.EOF {
42 if !done {
43 // Empty archive
44 return fmt.Errorf("empty archive: %s", src)
45 }
46
47 return nil
48 }
49 if err != nil {
50 return err
51 }
52
53 path := dst
54 if dir {
55 path = filepath.Join(path, hdr.Name)
56 }
57
58 if hdr.FileInfo().IsDir() {
59 if dir {
60 return fmt.Errorf("expected a single file: %s", src)
61 }
62
63 // A directory, just make the directory and continue unarchiving...
64 if err := os.MkdirAll(path, 0755); err != nil {
65 return err
66 }
67
68 continue
69 }
70
71 // We have a file. If we already decoded, then it is an error
72 if !dir && done {
73 return fmt.Errorf("expected a single file, got multiple: %s", src)
74 }
75
76 // Mark that we're done so future in single file mode errors
77 done = true
78
79 // Open the file for writing
80 dstF, err := os.Create(path)
81 if err != nil {
82 return err
83 }
84 _, err = io.Copy(dstF, tarR)
85 dstF.Close()
86 if err != nil {
87 return err
88 }
89
90 // Chmod the file
91 if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil {
92 return err
93 }
94 }
95}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go
new file mode 100644
index 0000000..686d6c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go
@@ -0,0 +1,134 @@
1package getter
2
3import (
4 "crypto/md5"
5 "encoding/hex"
6 "io"
7 "io/ioutil"
8 "os"
9 "path/filepath"
10 "reflect"
11 "runtime"
12 "sort"
13 "strings"
14 "testing"
15)
16
17// TestDecompressCase is a single test case for testing decompressors
18type TestDecompressCase struct {
19 Input string // Input is the complete path to the input file
20 Dir bool // Dir is whether or not we're testing directory mode
21 Err bool // Err is whether we expect an error or not
22 DirList []string // DirList is the list of files for Dir mode
23 FileMD5 string // FileMD5 is the expected MD5 for a single file
24}
25
26// TestDecompressor is a helper function for testing generic decompressors.
27func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) {
28 for _, tc := range cases {
29 t.Logf("Testing: %s", tc.Input)
30
31 // Temporary dir to store stuff
32 td, err := ioutil.TempDir("", "getter")
33 if err != nil {
34 t.Fatalf("err: %s", err)
35 }
36
37 // Destination is always joining result so that we have a new path
38 dst := filepath.Join(td, "subdir", "result")
39
40 // We use a function so defers work
41 func() {
42 defer os.RemoveAll(td)
43
44 // Decompress
45 err := d.Decompress(dst, tc.Input, tc.Dir)
46 if (err != nil) != tc.Err {
47 t.Fatalf("err %s: %s", tc.Input, err)
48 }
49 if tc.Err {
50 return
51 }
52
53 // If it isn't a directory, then check for a single file
54 if !tc.Dir {
55 fi, err := os.Stat(dst)
56 if err != nil {
57 t.Fatalf("err %s: %s", tc.Input, err)
58 }
59 if fi.IsDir() {
60 t.Fatalf("err %s: expected file, got directory", tc.Input)
61 }
62 if tc.FileMD5 != "" {
63 actual := testMD5(t, dst)
64 expected := tc.FileMD5
65 if actual != expected {
66 t.Fatalf("err %s: expected MD5 %s, got %s", tc.Input, expected, actual)
67 }
68 }
69
70 return
71 }
72
73 // Convert expected for windows
74 expected := tc.DirList
75 if runtime.GOOS == "windows" {
76 for i, v := range expected {
77 expected[i] = strings.Replace(v, "/", "\\", -1)
78 }
79 }
80
81 // Directory, check for the correct contents
82 actual := testListDir(t, dst)
83 if !reflect.DeepEqual(actual, expected) {
84 t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected)
85 }
86 }()
87 }
88}
89
90func testListDir(t *testing.T, path string) []string {
91 var result []string
92 err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error {
93 if err != nil {
94 return err
95 }
96
97 sub = strings.TrimPrefix(sub, path)
98 if sub == "" {
99 return nil
100 }
101 sub = sub[1:] // Trim the leading path sep.
102
103 // If it is a dir, add trailing sep
104 if info.IsDir() {
105 sub += "/"
106 }
107
108 result = append(result, sub)
109 return nil
110 })
111 if err != nil {
112 t.Fatalf("err: %s", err)
113 }
114
115 sort.Strings(result)
116 return result
117}
118
119func testMD5(t *testing.T, path string) string {
120 f, err := os.Open(path)
121 if err != nil {
122 t.Fatalf("err: %s", err)
123 }
124 defer f.Close()
125
126 h := md5.New()
127 _, err = io.Copy(h, f)
128 if err != nil {
129 t.Fatalf("err: %s", err)
130 }
131
132 result := h.Sum(nil)
133 return hex.EncodeToString(result)
134}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go
new file mode 100644
index 0000000..e8b1c31
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go
@@ -0,0 +1,99 @@
1package getter
2
3import (
4 "archive/tar"
5 "compress/gzip"
6 "fmt"
7 "io"
8 "os"
9 "path/filepath"
10)
11
12// TarGzipDecompressor is an implementation of Decompressor that can
13// decompress tar.gzip files.
14type TarGzipDecompressor struct{}
15
16func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error {
17 // If we're going into a directory we should make that first
18 mkdir := dst
19 if !dir {
20 mkdir = filepath.Dir(dst)
21 }
22 if err := os.MkdirAll(mkdir, 0755); err != nil {
23 return err
24 }
25
26 // File first
27 f, err := os.Open(src)
28 if err != nil {
29 return err
30 }
31 defer f.Close()
32
33 // Gzip compression is second
34 gzipR, err := gzip.NewReader(f)
35 if err != nil {
36 return fmt.Errorf("Error opening a gzip reader for %s: %s", src, err)
37 }
38 defer gzipR.Close()
39
40 // Once gzip decompressed we have a tar format
41 tarR := tar.NewReader(gzipR)
42 done := false
43 for {
44 hdr, err := tarR.Next()
45 if err == io.EOF {
46 if !done {
47 // Empty archive
48 return fmt.Errorf("empty archive: %s", src)
49 }
50
51 return nil
52 }
53 if err != nil {
54 return err
55 }
56
57 path := dst
58 if dir {
59 path = filepath.Join(path, hdr.Name)
60 }
61
62 if hdr.FileInfo().IsDir() {
63 if !dir {
64 return fmt.Errorf("expected a single file: %s", src)
65 }
66
67 // A directory, just make the directory and continue unarchiving...
68 if err := os.MkdirAll(path, 0755); err != nil {
69 return err
70 }
71
72 continue
73 }
74
75 // We have a file. If we already decoded, then it is an error
76 if !dir && done {
77 return fmt.Errorf("expected a single file, got multiple: %s", src)
78 }
79
80 // Mark that we're done so future in single file mode errors
81 done = true
82
83 // Open the file for writing
84 dstF, err := os.Create(path)
85 if err != nil {
86 return err
87 }
88 _, err = io.Copy(dstF, tarR)
89 dstF.Close()
90 if err != nil {
91 return err
92 }
93
94 // Chmod the file
95 if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil {
96 return err
97 }
98 }
99}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go
new file mode 100644
index 0000000..a065c07
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go
@@ -0,0 +1,96 @@
1package getter
2
3import (
4 "archive/zip"
5 "fmt"
6 "io"
7 "os"
8 "path/filepath"
9)
10
11// ZipDecompressor is an implementation of Decompressor that can
12// decompress tar.gzip files.
13type ZipDecompressor struct{}
14
15func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error {
16 // If we're going into a directory we should make that first
17 mkdir := dst
18 if !dir {
19 mkdir = filepath.Dir(dst)
20 }
21 if err := os.MkdirAll(mkdir, 0755); err != nil {
22 return err
23 }
24
25 // Open the zip
26 zipR, err := zip.OpenReader(src)
27 if err != nil {
28 return err
29 }
30 defer zipR.Close()
31
32 // Check the zip integrity
33 if len(zipR.File) == 0 {
34 // Empty archive
35 return fmt.Errorf("empty archive: %s", src)
36 }
37 if !dir && len(zipR.File) > 1 {
38 return fmt.Errorf("expected a single file: %s", src)
39 }
40
41 // Go through and unarchive
42 for _, f := range zipR.File {
43 path := dst
44 if dir {
45 path = filepath.Join(path, f.Name)
46 }
47
48 if f.FileInfo().IsDir() {
49 if !dir {
50 return fmt.Errorf("expected a single file: %s", src)
51 }
52
53 // A directory, just make the directory and continue unarchiving...
54 if err := os.MkdirAll(path, 0755); err != nil {
55 return err
56 }
57
58 continue
59 }
60
61 // Create the enclosing directories if we must. ZIP files aren't
62 // required to contain entries for just the directories so this
63 // can happen.
64 if dir {
65 if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
66 return err
67 }
68 }
69
70 // Open the file for reading
71 srcF, err := f.Open()
72 if err != nil {
73 return err
74 }
75
76 // Open the file for writing
77 dstF, err := os.Create(path)
78 if err != nil {
79 srcF.Close()
80 return err
81 }
82 _, err = io.Copy(dstF, srcF)
83 srcF.Close()
84 dstF.Close()
85 if err != nil {
86 return err
87 }
88
89 // Chmod the file
90 if err := os.Chmod(path, f.Mode()); err != nil {
91 return err
92 }
93 }
94
95 return nil
96}
diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go
new file mode 100644
index 0000000..481b737
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect.go
@@ -0,0 +1,97 @@
1package getter
2
3import (
4 "fmt"
5 "path/filepath"
6
7 "github.com/hashicorp/go-getter/helper/url"
8)
9
10// Detector defines the interface that an invalid URL or a URL with a blank
11// scheme is passed through in order to determine if its shorthand for
12// something else well-known.
13type Detector interface {
14 // Detect will detect whether the string matches a known pattern to
15 // turn it into a proper URL.
16 Detect(string, string) (string, bool, error)
17}
18
19// Detectors is the list of detectors that are tried on an invalid URL.
20// This is also the order they're tried (index 0 is first).
21var Detectors []Detector
22
23func init() {
24 Detectors = []Detector{
25 new(GitHubDetector),
26 new(BitBucketDetector),
27 new(S3Detector),
28 new(FileDetector),
29 }
30}
31
32// Detect turns a source string into another source string if it is
33// detected to be of a known pattern.
34//
35// The third parameter should be the list of detectors to use in the
36// order to try them. If you don't want to configure this, just use
37// the global Detectors variable.
38//
39// This is safe to be called with an already valid source string: Detect
40// will just return it.
41func Detect(src string, pwd string, ds []Detector) (string, error) {
42 getForce, getSrc := getForcedGetter(src)
43
44 // Separate out the subdir if there is one, we don't pass that to detect
45 getSrc, subDir := SourceDirSubdir(getSrc)
46
47 u, err := url.Parse(getSrc)
48 if err == nil && u.Scheme != "" {
49 // Valid URL
50 return src, nil
51 }
52
53 for _, d := range ds {
54 result, ok, err := d.Detect(getSrc, pwd)
55 if err != nil {
56 return "", err
57 }
58 if !ok {
59 continue
60 }
61
62 var detectForce string
63 detectForce, result = getForcedGetter(result)
64 result, detectSubdir := SourceDirSubdir(result)
65
66 // If we have a subdir from the detection, then prepend it to our
67 // requested subdir.
68 if detectSubdir != "" {
69 if subDir != "" {
70 subDir = filepath.Join(detectSubdir, subDir)
71 } else {
72 subDir = detectSubdir
73 }
74 }
75 if subDir != "" {
76 u, err := url.Parse(result)
77 if err != nil {
78 return "", fmt.Errorf("Error parsing URL: %s", err)
79 }
80 u.Path += "//" + subDir
81 result = u.String()
82 }
83
84 // Preserve the forced getter if it exists. We try to use the
85 // original set force first, followed by any force set by the
86 // detector.
87 if getForce != "" {
88 result = fmt.Sprintf("%s::%s", getForce, result)
89 } else if detectForce != "" {
90 result = fmt.Sprintf("%s::%s", detectForce, result)
91 }
92
93 return result, nil
94 }
95
96 return "", fmt.Errorf("invalid source string: %s", src)
97}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go
new file mode 100644
index 0000000..a183a17
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go
@@ -0,0 +1,66 @@
1package getter
2
3import (
4 "encoding/json"
5 "fmt"
6 "net/http"
7 "net/url"
8 "strings"
9)
10
11// BitBucketDetector implements Detector to detect BitBucket URLs and turn
12// them into URLs that the Git or Hg Getter can understand.
13type BitBucketDetector struct{}
14
15func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) {
16 if len(src) == 0 {
17 return "", false, nil
18 }
19
20 if strings.HasPrefix(src, "bitbucket.org/") {
21 return d.detectHTTP(src)
22 }
23
24 return "", false, nil
25}
26
27func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) {
28 u, err := url.Parse("https://" + src)
29 if err != nil {
30 return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err)
31 }
32
33 // We need to get info on this BitBucket repository to determine whether
34 // it is Git or Hg.
35 var info struct {
36 SCM string `json:"scm"`
37 }
38 infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path
39 resp, err := http.Get(infoUrl)
40 if err != nil {
41 return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err)
42 }
43 if resp.StatusCode == 403 {
44 // A private repo
45 return "", true, fmt.Errorf(
46 "shorthand BitBucket URL can't be used for private repos, " +
47 "please use a full URL")
48 }
49 dec := json.NewDecoder(resp.Body)
50 if err := dec.Decode(&info); err != nil {
51 return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err)
52 }
53
54 switch info.SCM {
55 case "git":
56 if !strings.HasSuffix(u.Path, ".git") {
57 u.Path += ".git"
58 }
59
60 return "git::" + u.String(), true, nil
61 case "hg":
62 return "hg::" + u.String(), true, nil
63 default:
64 return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM)
65 }
66}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go
new file mode 100644
index 0000000..756ea43
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_file.go
@@ -0,0 +1,67 @@
1package getter
2
3import (
4 "fmt"
5 "os"
6 "path/filepath"
7 "runtime"
8)
9
10// FileDetector implements Detector to detect file paths.
11type FileDetector struct{}
12
13func (d *FileDetector) Detect(src, pwd string) (string, bool, error) {
14 if len(src) == 0 {
15 return "", false, nil
16 }
17
18 if !filepath.IsAbs(src) {
19 if pwd == "" {
20 return "", true, fmt.Errorf(
21 "relative paths require a module with a pwd")
22 }
23
24 // Stat the pwd to determine if its a symbolic link. If it is,
25 // then the pwd becomes the original directory. Otherwise,
26 // `filepath.Join` below does some weird stuff.
27 //
28 // We just ignore if the pwd doesn't exist. That error will be
29 // caught later when we try to use the URL.
30 if fi, err := os.Lstat(pwd); !os.IsNotExist(err) {
31 if err != nil {
32 return "", true, err
33 }
34 if fi.Mode()&os.ModeSymlink != 0 {
35 pwd, err = os.Readlink(pwd)
36 if err != nil {
37 return "", true, err
38 }
39
40 // The symlink itself might be a relative path, so we have to
41 // resolve this to have a correctly rooted URL.
42 pwd, err = filepath.Abs(pwd)
43 if err != nil {
44 return "", true, err
45 }
46 }
47 }
48
49 src = filepath.Join(pwd, src)
50 }
51
52 return fmtFileURL(src), true, nil
53}
54
55func fmtFileURL(path string) string {
56 if runtime.GOOS == "windows" {
57 // Make sure we're using "/" on Windows. URLs are "/"-based.
58 path = filepath.ToSlash(path)
59 return fmt.Sprintf("file://%s", path)
60 }
61
62 // Make sure that we don't start with "/" since we add that below.
63 if path[0] == '/' {
64 path = path[1:]
65 }
66 return fmt.Sprintf("file:///%s", path)
67}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/detect_github.go
new file mode 100644
index 0000000..c084ad9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_github.go
@@ -0,0 +1,73 @@
1package getter
2
3import (
4 "fmt"
5 "net/url"
6 "strings"
7)
8
9// GitHubDetector implements Detector to detect GitHub URLs and turn
10// them into URLs that the Git Getter can understand.
11type GitHubDetector struct{}
12
13func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) {
14 if len(src) == 0 {
15 return "", false, nil
16 }
17
18 if strings.HasPrefix(src, "github.com/") {
19 return d.detectHTTP(src)
20 } else if strings.HasPrefix(src, "git@github.com:") {
21 return d.detectSSH(src)
22 }
23
24 return "", false, nil
25}
26
27func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) {
28 parts := strings.Split(src, "/")
29 if len(parts) < 3 {
30 return "", false, fmt.Errorf(
31 "GitHub URLs should be github.com/username/repo")
32 }
33
34 urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/"))
35 url, err := url.Parse(urlStr)
36 if err != nil {
37 return "", true, fmt.Errorf("error parsing GitHub URL: %s", err)
38 }
39
40 if !strings.HasSuffix(url.Path, ".git") {
41 url.Path += ".git"
42 }
43
44 if len(parts) > 3 {
45 url.Path += "//" + strings.Join(parts[3:], "/")
46 }
47
48 return "git::" + url.String(), true, nil
49}
50
51func (d *GitHubDetector) detectSSH(src string) (string, bool, error) {
52 idx := strings.Index(src, ":")
53 qidx := strings.Index(src, "?")
54 if qidx == -1 {
55 qidx = len(src)
56 }
57
58 var u url.URL
59 u.Scheme = "ssh"
60 u.User = url.User("git")
61 u.Host = "github.com"
62 u.Path = src[idx+1 : qidx]
63 if qidx < len(src) {
64 q, err := url.ParseQuery(src[qidx+1:])
65 if err != nil {
66 return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err)
67 }
68
69 u.RawQuery = q.Encode()
70 }
71
72 return "git::" + u.String(), true, nil
73}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_s3.go b/vendor/github.com/hashicorp/go-getter/detect_s3.go
new file mode 100644
index 0000000..8e0f4a0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_s3.go
@@ -0,0 +1,61 @@
1package getter
2
3import (
4 "fmt"
5 "net/url"
6 "strings"
7)
8
9// S3Detector implements Detector to detect S3 URLs and turn
10// them into URLs that the S3 getter can understand.
11type S3Detector struct{}
12
13func (d *S3Detector) Detect(src, _ string) (string, bool, error) {
14 if len(src) == 0 {
15 return "", false, nil
16 }
17
18 if strings.Contains(src, ".amazonaws.com/") {
19 return d.detectHTTP(src)
20 }
21
22 return "", false, nil
23}
24
25func (d *S3Detector) detectHTTP(src string) (string, bool, error) {
26 parts := strings.Split(src, "/")
27 if len(parts) < 2 {
28 return "", false, fmt.Errorf(
29 "URL is not a valid S3 URL")
30 }
31
32 hostParts := strings.Split(parts[0], ".")
33 if len(hostParts) == 3 {
34 return d.detectPathStyle(hostParts[0], parts[1:])
35 } else if len(hostParts) == 4 {
36 return d.detectVhostStyle(hostParts[1], hostParts[0], parts[1:])
37 } else {
38 return "", false, fmt.Errorf(
39 "URL is not a valid S3 URL")
40 }
41}
42
43func (d *S3Detector) detectPathStyle(region string, parts []string) (string, bool, error) {
44 urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s", region, strings.Join(parts, "/"))
45 url, err := url.Parse(urlStr)
46 if err != nil {
47 return "", false, fmt.Errorf("error parsing S3 URL: %s", err)
48 }
49
50 return "s3::" + url.String(), true, nil
51}
52
53func (d *S3Detector) detectVhostStyle(region, bucket string, parts []string) (string, bool, error) {
54 urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s/%s", region, bucket, strings.Join(parts, "/"))
55 url, err := url.Parse(urlStr)
56 if err != nil {
57 return "", false, fmt.Errorf("error parsing S3 URL: %s", err)
58 }
59
60 return "s3::" + url.String(), true, nil
61}
diff --git a/vendor/github.com/hashicorp/go-getter/folder_storage.go b/vendor/github.com/hashicorp/go-getter/folder_storage.go
new file mode 100644
index 0000000..647ccf4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/folder_storage.go
@@ -0,0 +1,65 @@
1package getter
2
3import (
4 "crypto/md5"
5 "encoding/hex"
6 "fmt"
7 "os"
8 "path/filepath"
9)
10
11// FolderStorage is an implementation of the Storage interface that manages
12// modules on the disk.
13type FolderStorage struct {
14 // StorageDir is the directory where the modules will be stored.
15 StorageDir string
16}
17
18// Dir implements Storage.Dir
19func (s *FolderStorage) Dir(key string) (d string, e bool, err error) {
20 d = s.dir(key)
21 _, err = os.Stat(d)
22 if err == nil {
23 // Directory exists
24 e = true
25 return
26 }
27 if os.IsNotExist(err) {
28 // Directory doesn't exist
29 d = ""
30 e = false
31 err = nil
32 return
33 }
34
35 // An error
36 d = ""
37 e = false
38 return
39}
40
41// Get implements Storage.Get
42func (s *FolderStorage) Get(key string, source string, update bool) error {
43 dir := s.dir(key)
44 if !update {
45 if _, err := os.Stat(dir); err == nil {
46 // If the directory already exists, then we're done since
47 // we're not updating.
48 return nil
49 } else if !os.IsNotExist(err) {
50 // If the error we got wasn't a file-not-exist error, then
51 // something went wrong and we should report it.
52 return fmt.Errorf("Error reading module directory: %s", err)
53 }
54 }
55
56 // Get the source. This always forces an update.
57 return Get(dir, source)
58}
59
60// dir returns the directory name internally that we'll use to map to
61// internally.
62func (s *FolderStorage) dir(key string) string {
63 sum := md5.Sum([]byte(key))
64 return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:]))
65}
diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go
new file mode 100644
index 0000000..c3236f5
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get.go
@@ -0,0 +1,139 @@
1// getter is a package for downloading files or directories from a variety of
2// protocols.
3//
4// getter is unique in its ability to download both directories and files.
5// It also detects certain source strings to be protocol-specific URLs. For
6// example, "github.com/hashicorp/go-getter" would turn into a Git URL and
7// use the Git protocol.
8//
9// Protocols and detectors are extensible.
10//
11// To get started, see Client.
12package getter
13
14import (
15 "bytes"
16 "fmt"
17 "net/url"
18 "os/exec"
19 "regexp"
20 "syscall"
21)
22
23// Getter defines the interface that schemes must implement to download
24// things.
25type Getter interface {
26 // Get downloads the given URL into the given directory. This always
27 // assumes that we're updating and gets the latest version that it can.
28 //
29 // The directory may already exist (if we're updating). If it is in a
30 // format that isn't understood, an error should be returned. Get shouldn't
31 // simply nuke the directory.
32 Get(string, *url.URL) error
33
34 // GetFile downloads the give URL into the given path. The URL must
35 // reference a single file. If possible, the Getter should check if
36 // the remote end contains the same file and no-op this operation.
37 GetFile(string, *url.URL) error
38
39 // ClientMode returns the mode based on the given URL. This is used to
40 // allow clients to let the getters decide which mode to use.
41 ClientMode(*url.URL) (ClientMode, error)
42}
43
44// Getters is the mapping of scheme to the Getter implementation that will
45// be used to get a dependency.
46var Getters map[string]Getter
47
48// forcedRegexp is the regular expression that finds forced getters. This
49// syntax is schema::url, example: git::https://foo.com
50var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`)
51
52func init() {
53 httpGetter := &HttpGetter{Netrc: true}
54
55 Getters = map[string]Getter{
56 "file": new(FileGetter),
57 "git": new(GitGetter),
58 "hg": new(HgGetter),
59 "s3": new(S3Getter),
60 "http": httpGetter,
61 "https": httpGetter,
62 }
63}
64
65// Get downloads the directory specified by src into the folder specified by
66// dst. If dst already exists, Get will attempt to update it.
67//
68// src is a URL, whereas dst is always just a file path to a folder. This
69// folder doesn't need to exist. It will be created if it doesn't exist.
70func Get(dst, src string) error {
71 return (&Client{
72 Src: src,
73 Dst: dst,
74 Dir: true,
75 Getters: Getters,
76 }).Get()
77}
78
79// GetAny downloads a URL into the given destination. Unlike Get or
80// GetFile, both directories and files are supported.
81//
82// dst must be a directory. If src is a file, it will be downloaded
83// into dst with the basename of the URL. If src is a directory or
84// archive, it will be unpacked directly into dst.
85func GetAny(dst, src string) error {
86 return (&Client{
87 Src: src,
88 Dst: dst,
89 Mode: ClientModeAny,
90 Getters: Getters,
91 }).Get()
92}
93
94// GetFile downloads the file specified by src into the path specified by
95// dst.
96func GetFile(dst, src string) error {
97 return (&Client{
98 Src: src,
99 Dst: dst,
100 Dir: false,
101 Getters: Getters,
102 }).Get()
103}
104
105// getRunCommand is a helper that will run a command and capture the output
106// in the case an error happens.
107func getRunCommand(cmd *exec.Cmd) error {
108 var buf bytes.Buffer
109 cmd.Stdout = &buf
110 cmd.Stderr = &buf
111 err := cmd.Run()
112 if err == nil {
113 return nil
114 }
115 if exiterr, ok := err.(*exec.ExitError); ok {
116 // The program has exited with an exit code != 0
117 if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
118 return fmt.Errorf(
119 "%s exited with %d: %s",
120 cmd.Path,
121 status.ExitStatus(),
122 buf.String())
123 }
124 }
125
126 return fmt.Errorf("error running %s: %s", cmd.Path, buf.String())
127}
128
129// getForcedGetter takes a source and returns the tuple of the forced
130// getter and the raw URL (without the force syntax).
131func getForcedGetter(src string) (string, string) {
132 var forced string
133 if ms := forcedRegexp.FindStringSubmatch(src); ms != nil {
134 forced = ms[1]
135 src = ms[2]
136 }
137
138 return forced, src
139}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go
new file mode 100644
index 0000000..e5d2d61
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_file.go
@@ -0,0 +1,32 @@
1package getter
2
3import (
4 "net/url"
5 "os"
6)
7
8// FileGetter is a Getter implementation that will download a module from
9// a file scheme.
10type FileGetter struct {
11 // Copy, if set to true, will copy data instead of using a symlink
12 Copy bool
13}
14
15func (g *FileGetter) ClientMode(u *url.URL) (ClientMode, error) {
16 path := u.Path
17 if u.RawPath != "" {
18 path = u.RawPath
19 }
20
21 fi, err := os.Stat(path)
22 if err != nil {
23 return 0, err
24 }
25
26 // Check if the source is a directory.
27 if fi.IsDir() {
28 return ClientModeDir, nil
29 }
30
31 return ClientModeFile, nil
32}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go
new file mode 100644
index 0000000..c89a2d5
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_file_unix.go
@@ -0,0 +1,103 @@
1// +build !windows
2
3package getter
4
5import (
6 "fmt"
7 "io"
8 "net/url"
9 "os"
10 "path/filepath"
11)
12
13func (g *FileGetter) Get(dst string, u *url.URL) error {
14 path := u.Path
15 if u.RawPath != "" {
16 path = u.RawPath
17 }
18
19 // The source path must exist and be a directory to be usable.
20 if fi, err := os.Stat(path); err != nil {
21 return fmt.Errorf("source path error: %s", err)
22 } else if !fi.IsDir() {
23 return fmt.Errorf("source path must be a directory")
24 }
25
26 fi, err := os.Lstat(dst)
27 if err != nil && !os.IsNotExist(err) {
28 return err
29 }
30
31 // If the destination already exists, it must be a symlink
32 if err == nil {
33 mode := fi.Mode()
34 if mode&os.ModeSymlink == 0 {
35 return fmt.Errorf("destination exists and is not a symlink")
36 }
37
38 // Remove the destination
39 if err := os.Remove(dst); err != nil {
40 return err
41 }
42 }
43
44 // Create all the parent directories
45 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
46 return err
47 }
48
49 return os.Symlink(path, dst)
50}
51
52func (g *FileGetter) GetFile(dst string, u *url.URL) error {
53 path := u.Path
54 if u.RawPath != "" {
55 path = u.RawPath
56 }
57
58 // The source path must exist and be a file to be usable.
59 if fi, err := os.Stat(path); err != nil {
60 return fmt.Errorf("source path error: %s", err)
61 } else if fi.IsDir() {
62 return fmt.Errorf("source path must be a file")
63 }
64
65 _, err := os.Lstat(dst)
66 if err != nil && !os.IsNotExist(err) {
67 return err
68 }
69
70 // If the destination already exists, it must be a symlink
71 if err == nil {
72 // Remove the destination
73 if err := os.Remove(dst); err != nil {
74 return err
75 }
76 }
77
78 // Create all the parent directories
79 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
80 return err
81 }
82
83 // If we're not copying, just symlink and we're done
84 if !g.Copy {
85 return os.Symlink(path, dst)
86 }
87
88 // Copy
89 srcF, err := os.Open(path)
90 if err != nil {
91 return err
92 }
93 defer srcF.Close()
94
95 dstF, err := os.Create(dst)
96 if err != nil {
97 return err
98 }
99 defer dstF.Close()
100
101 _, err = io.Copy(dstF, srcF)
102 return err
103}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/get_file_windows.go
new file mode 100644
index 0000000..f87ed0a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_file_windows.go
@@ -0,0 +1,120 @@
1// +build windows
2
3package getter
4
5import (
6 "fmt"
7 "io"
8 "net/url"
9 "os"
10 "os/exec"
11 "path/filepath"
12 "strings"
13)
14
15func (g *FileGetter) Get(dst string, u *url.URL) error {
16 path := u.Path
17 if u.RawPath != "" {
18 path = u.RawPath
19 }
20
21 // The source path must exist and be a directory to be usable.
22 if fi, err := os.Stat(path); err != nil {
23 return fmt.Errorf("source path error: %s", err)
24 } else if !fi.IsDir() {
25 return fmt.Errorf("source path must be a directory")
26 }
27
28 fi, err := os.Lstat(dst)
29 if err != nil && !os.IsNotExist(err) {
30 return err
31 }
32
33 // If the destination already exists, it must be a symlink
34 if err == nil {
35 mode := fi.Mode()
36 if mode&os.ModeSymlink == 0 {
37 return fmt.Errorf("destination exists and is not a symlink")
38 }
39
40 // Remove the destination
41 if err := os.Remove(dst); err != nil {
42 return err
43 }
44 }
45
46 // Create all the parent directories
47 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
48 return err
49 }
50
51 sourcePath := toBackslash(path)
52
53 // Use mklink to create a junction point
54 output, err := exec.Command("cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput()
55 if err != nil {
56 return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output)
57 }
58
59 return nil
60}
61
62func (g *FileGetter) GetFile(dst string, u *url.URL) error {
63 path := u.Path
64 if u.RawPath != "" {
65 path = u.RawPath
66 }
67
68 // The source path must exist and be a directory to be usable.
69 if fi, err := os.Stat(path); err != nil {
70 return fmt.Errorf("source path error: %s", err)
71 } else if fi.IsDir() {
72 return fmt.Errorf("source path must be a file")
73 }
74
75 _, err := os.Lstat(dst)
76 if err != nil && !os.IsNotExist(err) {
77 return err
78 }
79
80 // If the destination already exists, it must be a symlink
81 if err == nil {
82 // Remove the destination
83 if err := os.Remove(dst); err != nil {
84 return err
85 }
86 }
87
88 // Create all the parent directories
89 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
90 return err
91 }
92
93 // If we're not copying, just symlink and we're done
94 if !g.Copy {
95 return os.Symlink(path, dst)
96 }
97
98 // Copy
99 srcF, err := os.Open(path)
100 if err != nil {
101 return err
102 }
103 defer srcF.Close()
104
105 dstF, err := os.Create(dst)
106 if err != nil {
107 return err
108 }
109 defer dstF.Close()
110
111 _, err = io.Copy(dstF, srcF)
112 return err
113}
114
115// toBackslash returns the result of replacing each slash character
116// in path with a backslash ('\') character. Multiple separators are
117// replaced by multiple backslashes.
118func toBackslash(path string) string {
119 return strings.Replace(path, "/", "\\", -1)
120}
diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go
new file mode 100644
index 0000000..0728139
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_git.go
@@ -0,0 +1,225 @@
1package getter
2
3import (
4 "encoding/base64"
5 "fmt"
6 "io/ioutil"
7 "net/url"
8 "os"
9 "os/exec"
10 "path/filepath"
11 "strings"
12
13 urlhelper "github.com/hashicorp/go-getter/helper/url"
14 "github.com/hashicorp/go-version"
15)
16
17// GitGetter is a Getter implementation that will download a module from
18// a git repository.
19type GitGetter struct{}
20
21func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) {
22 return ClientModeDir, nil
23}
24
25func (g *GitGetter) Get(dst string, u *url.URL) error {
26 if _, err := exec.LookPath("git"); err != nil {
27 return fmt.Errorf("git must be available and on the PATH")
28 }
29
30 // Extract some query parameters we use
31 var ref, sshKey string
32 q := u.Query()
33 if len(q) > 0 {
34 ref = q.Get("ref")
35 q.Del("ref")
36
37 sshKey = q.Get("sshkey")
38 q.Del("sshkey")
39
40 // Copy the URL
41 var newU url.URL = *u
42 u = &newU
43 u.RawQuery = q.Encode()
44 }
45
46 var sshKeyFile string
47 if sshKey != "" {
48 // Check that the git version is sufficiently new.
49 if err := checkGitVersion("2.3"); err != nil {
50 return fmt.Errorf("Error using ssh key: %v", err)
51 }
52
53 // We have an SSH key - decode it.
54 raw, err := base64.StdEncoding.DecodeString(sshKey)
55 if err != nil {
56 return err
57 }
58
59 // Create a temp file for the key and ensure it is removed.
60 fh, err := ioutil.TempFile("", "go-getter")
61 if err != nil {
62 return err
63 }
64 sshKeyFile = fh.Name()
65 defer os.Remove(sshKeyFile)
66
67 // Set the permissions prior to writing the key material.
68 if err := os.Chmod(sshKeyFile, 0600); err != nil {
69 return err
70 }
71
72 // Write the raw key into the temp file.
73 _, err = fh.Write(raw)
74 fh.Close()
75 if err != nil {
76 return err
77 }
78 }
79
80 // Clone or update the repository
81 _, err := os.Stat(dst)
82 if err != nil && !os.IsNotExist(err) {
83 return err
84 }
85 if err == nil {
86 err = g.update(dst, sshKeyFile, ref)
87 } else {
88 err = g.clone(dst, sshKeyFile, u)
89 }
90 if err != nil {
91 return err
92 }
93
94 // Next: check out the proper tag/branch if it is specified, and checkout
95 if ref != "" {
96 if err := g.checkout(dst, ref); err != nil {
97 return err
98 }
99 }
100
101 // Lastly, download any/all submodules.
102 return g.fetchSubmodules(dst, sshKeyFile)
103}
104
105// GetFile for Git doesn't support updating at this time. It will download
106// the file every time.
107func (g *GitGetter) GetFile(dst string, u *url.URL) error {
108 td, err := ioutil.TempDir("", "getter-git")
109 if err != nil {
110 return err
111 }
112 if err := os.RemoveAll(td); err != nil {
113 return err
114 }
115
116 // Get the filename, and strip the filename from the URL so we can
117 // just get the repository directly.
118 filename := filepath.Base(u.Path)
119 u.Path = filepath.Dir(u.Path)
120
121 // Get the full repository
122 if err := g.Get(td, u); err != nil {
123 return err
124 }
125
126 // Copy the single file
127 u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename)))
128 if err != nil {
129 return err
130 }
131
132 fg := &FileGetter{Copy: true}
133 return fg.GetFile(dst, u)
134}
135
136func (g *GitGetter) checkout(dst string, ref string) error {
137 cmd := exec.Command("git", "checkout", ref)
138 cmd.Dir = dst
139 return getRunCommand(cmd)
140}
141
142func (g *GitGetter) clone(dst, sshKeyFile string, u *url.URL) error {
143 cmd := exec.Command("git", "clone", u.String(), dst)
144 setupGitEnv(cmd, sshKeyFile)
145 return getRunCommand(cmd)
146}
147
148func (g *GitGetter) update(dst, sshKeyFile, ref string) error {
149 // Determine if we're a branch. If we're NOT a branch, then we just
150 // switch to master prior to checking out
151 cmd := exec.Command("git", "show-ref", "-q", "--verify", "refs/heads/"+ref)
152 cmd.Dir = dst
153
154 if getRunCommand(cmd) != nil {
155 // Not a branch, switch to master. This will also catch non-existent
156 // branches, in which case we want to switch to master and then
157 // checkout the proper branch later.
158 ref = "master"
159 }
160
161 // We have to be on a branch to pull
162 if err := g.checkout(dst, ref); err != nil {
163 return err
164 }
165
166 cmd = exec.Command("git", "pull", "--ff-only")
167 cmd.Dir = dst
168 setupGitEnv(cmd, sshKeyFile)
169 return getRunCommand(cmd)
170}
171
172// fetchSubmodules downloads any configured submodules recursively.
173func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error {
174 cmd := exec.Command("git", "submodule", "update", "--init", "--recursive")
175 cmd.Dir = dst
176 setupGitEnv(cmd, sshKeyFile)
177 return getRunCommand(cmd)
178}
179
180// setupGitEnv sets up the environment for the given command. This is used to
181// pass configuration data to git and ssh and enables advanced cloning methods.
182func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) {
183 var sshOpts []string
184
185 if sshKeyFile != "" {
186 // We have an SSH key temp file configured, tell ssh about this.
187 sshOpts = append(sshOpts, "-i", sshKeyFile)
188 }
189
190 cmd.Env = append(os.Environ(),
191 // Set the ssh command to use for clones.
192 "GIT_SSH_COMMAND=ssh "+strings.Join(sshOpts, " "),
193 )
194}
195
196// checkGitVersion is used to check the version of git installed on the system
197// against a known minimum version. Returns an error if the installed version
198// is older than the given minimum.
199func checkGitVersion(min string) error {
200 want, err := version.NewVersion(min)
201 if err != nil {
202 return err
203 }
204
205 out, err := exec.Command("git", "version").Output()
206 if err != nil {
207 return err
208 }
209
210 fields := strings.Fields(string(out))
211 if len(fields) != 3 {
212 return fmt.Errorf("Unexpected 'git version' output: %q", string(out))
213 }
214
215 have, err := version.NewVersion(fields[2])
216 if err != nil {
217 return err
218 }
219
220 if have.LessThan(want) {
221 return fmt.Errorf("Required git version = %s, have %s", want, have)
222 }
223
224 return nil
225}
diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go
new file mode 100644
index 0000000..820bdd4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_hg.go
@@ -0,0 +1,131 @@
1package getter
2
3import (
4 "fmt"
5 "io/ioutil"
6 "net/url"
7 "os"
8 "os/exec"
9 "path/filepath"
10 "runtime"
11
12 urlhelper "github.com/hashicorp/go-getter/helper/url"
13)
14
15// HgGetter is a Getter implementation that will download a module from
16// a Mercurial repository.
17type HgGetter struct{}
18
19func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) {
20 return ClientModeDir, nil
21}
22
23func (g *HgGetter) Get(dst string, u *url.URL) error {
24 if _, err := exec.LookPath("hg"); err != nil {
25 return fmt.Errorf("hg must be available and on the PATH")
26 }
27
28 newURL, err := urlhelper.Parse(u.String())
29 if err != nil {
30 return err
31 }
32 if fixWindowsDrivePath(newURL) {
33 // See valid file path form on http://www.selenic.com/hg/help/urls
34 newURL.Path = fmt.Sprintf("/%s", newURL.Path)
35 }
36
37 // Extract some query parameters we use
38 var rev string
39 q := newURL.Query()
40 if len(q) > 0 {
41 rev = q.Get("rev")
42 q.Del("rev")
43
44 newURL.RawQuery = q.Encode()
45 }
46
47 _, err = os.Stat(dst)
48 if err != nil && !os.IsNotExist(err) {
49 return err
50 }
51 if err != nil {
52 if err := g.clone(dst, newURL); err != nil {
53 return err
54 }
55 }
56
57 if err := g.pull(dst, newURL); err != nil {
58 return err
59 }
60
61 return g.update(dst, newURL, rev)
62}
63
64// GetFile for Hg doesn't support updating at this time. It will download
65// the file every time.
66func (g *HgGetter) GetFile(dst string, u *url.URL) error {
67 td, err := ioutil.TempDir("", "getter-hg")
68 if err != nil {
69 return err
70 }
71 if err := os.RemoveAll(td); err != nil {
72 return err
73 }
74
75 // Get the filename, and strip the filename from the URL so we can
76 // just get the repository directly.
77 filename := filepath.Base(u.Path)
78 u.Path = filepath.ToSlash(filepath.Dir(u.Path))
79
80 // If we're on Windows, we need to set the host to "localhost" for hg
81 if runtime.GOOS == "windows" {
82 u.Host = "localhost"
83 }
84
85 // Get the full repository
86 if err := g.Get(td, u); err != nil {
87 return err
88 }
89
90 // Copy the single file
91 u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename)))
92 if err != nil {
93 return err
94 }
95
96 fg := &FileGetter{Copy: true}
97 return fg.GetFile(dst, u)
98}
99
100func (g *HgGetter) clone(dst string, u *url.URL) error {
101 cmd := exec.Command("hg", "clone", "-U", u.String(), dst)
102 return getRunCommand(cmd)
103}
104
105func (g *HgGetter) pull(dst string, u *url.URL) error {
106 cmd := exec.Command("hg", "pull")
107 cmd.Dir = dst
108 return getRunCommand(cmd)
109}
110
111func (g *HgGetter) update(dst string, u *url.URL, rev string) error {
112 args := []string{"update"}
113 if rev != "" {
114 args = append(args, rev)
115 }
116
117 cmd := exec.Command("hg", args...)
118 cmd.Dir = dst
119 return getRunCommand(cmd)
120}
121
122func fixWindowsDrivePath(u *url.URL) bool {
123 // hg assumes a file:/// prefix for Windows drive letter file paths.
124 // (e.g. file:///c:/foo/bar)
125 // If the URL Path does not begin with a '/' character, the resulting URL
126 // path will have a file:// prefix. (e.g. file://c:/foo/bar)
127 // See http://www.selenic.com/hg/help/urls and the examples listed in
128 // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936
129 return runtime.GOOS == "windows" && u.Scheme == "file" &&
130 len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':'
131}
diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go
new file mode 100644
index 0000000..3c02034
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_http.go
@@ -0,0 +1,219 @@
1package getter
2
3import (
4 "encoding/xml"
5 "fmt"
6 "io"
7 "io/ioutil"
8 "net/http"
9 "net/url"
10 "os"
11 "path/filepath"
12 "strings"
13)
14
15// HttpGetter is a Getter implementation that will download from an HTTP
16// endpoint.
17//
18// For file downloads, HTTP is used directly.
19//
20// The protocol for downloading a directory from an HTTP endpoing is as follows:
21//
22// An HTTP GET request is made to the URL with the additional GET parameter
23// "terraform-get=1". This lets you handle that scenario specially if you
24// wish. The response must be a 2xx.
25//
26// First, a header is looked for "X-Terraform-Get" which should contain
27// a source URL to download.
28//
29// If the header is not present, then a meta tag is searched for named
30// "terraform-get" and the content should be a source URL.
31//
32// The source URL, whether from the header or meta tag, must be a fully
33// formed URL. The shorthand syntax of "github.com/foo/bar" or relative
34// paths are not allowed.
35type HttpGetter struct {
36 // Netrc, if true, will lookup and use auth information found
37 // in the user's netrc file if available.
38 Netrc bool
39}
40
41func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) {
42 if strings.HasSuffix(u.Path, "/") {
43 return ClientModeDir, nil
44 }
45 return ClientModeFile, nil
46}
47
48func (g *HttpGetter) Get(dst string, u *url.URL) error {
49 // Copy the URL so we can modify it
50 var newU url.URL = *u
51 u = &newU
52
53 if g.Netrc {
54 // Add auth from netrc if we can
55 if err := addAuthFromNetrc(u); err != nil {
56 return err
57 }
58 }
59
60 // Add terraform-get to the parameter.
61 q := u.Query()
62 q.Add("terraform-get", "1")
63 u.RawQuery = q.Encode()
64
65 // Get the URL
66 resp, err := http.Get(u.String())
67 if err != nil {
68 return err
69 }
70 defer resp.Body.Close()
71 if resp.StatusCode < 200 || resp.StatusCode >= 300 {
72 return fmt.Errorf("bad response code: %d", resp.StatusCode)
73 }
74
75 // Extract the source URL
76 var source string
77 if v := resp.Header.Get("X-Terraform-Get"); v != "" {
78 source = v
79 } else {
80 source, err = g.parseMeta(resp.Body)
81 if err != nil {
82 return err
83 }
84 }
85 if source == "" {
86 return fmt.Errorf("no source URL was returned")
87 }
88
89 // If there is a subdir component, then we download the root separately
90 // into a temporary directory, then copy over the proper subdir.
91 source, subDir := SourceDirSubdir(source)
92 if subDir == "" {
93 return Get(dst, source)
94 }
95
96 // We have a subdir, time to jump some hoops
97 return g.getSubdir(dst, source, subDir)
98}
99
100func (g *HttpGetter) GetFile(dst string, u *url.URL) error {
101 resp, err := http.Get(u.String())
102 if err != nil {
103 return err
104 }
105 defer resp.Body.Close()
106 if resp.StatusCode != 200 {
107 return fmt.Errorf("bad response code: %d", resp.StatusCode)
108 }
109
110 // Create all the parent directories
111 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
112 return err
113 }
114
115 f, err := os.Create(dst)
116 if err != nil {
117 return err
118 }
119 defer f.Close()
120
121 _, err = io.Copy(f, resp.Body)
122 return err
123}
124
125// getSubdir downloads the source into the destination, but with
126// the proper subdir.
127func (g *HttpGetter) getSubdir(dst, source, subDir string) error {
128 // Create a temporary directory to store the full source
129 td, err := ioutil.TempDir("", "tf")
130 if err != nil {
131 return err
132 }
133 defer os.RemoveAll(td)
134
135 // Download that into the given directory
136 if err := Get(td, source); err != nil {
137 return err
138 }
139
140 // Make sure the subdir path actually exists
141 sourcePath := filepath.Join(td, subDir)
142 if _, err := os.Stat(sourcePath); err != nil {
143 return fmt.Errorf(
144 "Error downloading %s: %s", source, err)
145 }
146
147 // Copy the subdirectory into our actual destination.
148 if err := os.RemoveAll(dst); err != nil {
149 return err
150 }
151
152 // Make the final destination
153 if err := os.MkdirAll(dst, 0755); err != nil {
154 return err
155 }
156
157 return copyDir(dst, sourcePath, false)
158}
159
160// parseMeta looks for the first meta tag in the given reader that
161// will give us the source URL.
162func (g *HttpGetter) parseMeta(r io.Reader) (string, error) {
163 d := xml.NewDecoder(r)
164 d.CharsetReader = charsetReader
165 d.Strict = false
166 var err error
167 var t xml.Token
168 for {
169 t, err = d.Token()
170 if err != nil {
171 if err == io.EOF {
172 err = nil
173 }
174 return "", err
175 }
176 if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") {
177 return "", nil
178 }
179 if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") {
180 return "", nil
181 }
182 e, ok := t.(xml.StartElement)
183 if !ok || !strings.EqualFold(e.Name.Local, "meta") {
184 continue
185 }
186 if attrValue(e.Attr, "name") != "terraform-get" {
187 continue
188 }
189 if f := attrValue(e.Attr, "content"); f != "" {
190 return f, nil
191 }
192 }
193}
194
195// attrValue returns the attribute value for the case-insensitive key
196// `name', or the empty string if nothing is found.
197func attrValue(attrs []xml.Attr, name string) string {
198 for _, a := range attrs {
199 if strings.EqualFold(a.Name.Local, name) {
200 return a.Value
201 }
202 }
203 return ""
204}
205
206// charsetReader returns a reader for the given charset. Currently
207// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful
208// error which is printed by go get, so the user can find why the package
209// wasn't downloaded if the encoding is not supported. Note that, in
210// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters
211// greater than 0x7f are not rejected).
212func charsetReader(charset string, input io.Reader) (io.Reader, error) {
213 switch strings.ToLower(charset) {
214 case "ascii":
215 return input, nil
216 default:
217 return nil, fmt.Errorf("can't decode XML document using charset %q", charset)
218 }
219}
diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go
new file mode 100644
index 0000000..882e694
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_mock.go
@@ -0,0 +1,52 @@
1package getter
2
3import (
4 "net/url"
5)
6
7// MockGetter is an implementation of Getter that can be used for tests.
8type MockGetter struct {
9 // Proxy, if set, will be called after recording the calls below.
10 // If it isn't set, then the *Err values will be returned.
11 Proxy Getter
12
13 GetCalled bool
14 GetDst string
15 GetURL *url.URL
16 GetErr error
17
18 GetFileCalled bool
19 GetFileDst string
20 GetFileURL *url.URL
21 GetFileErr error
22}
23
24func (g *MockGetter) Get(dst string, u *url.URL) error {
25 g.GetCalled = true
26 g.GetDst = dst
27 g.GetURL = u
28
29 if g.Proxy != nil {
30 return g.Proxy.Get(dst, u)
31 }
32
33 return g.GetErr
34}
35
36func (g *MockGetter) GetFile(dst string, u *url.URL) error {
37 g.GetFileCalled = true
38 g.GetFileDst = dst
39 g.GetFileURL = u
40
41 if g.Proxy != nil {
42 return g.Proxy.GetFile(dst, u)
43 }
44 return g.GetFileErr
45}
46
47func (g *MockGetter) ClientMode(u *url.URL) (ClientMode, error) {
48 if l := len(u.Path); l > 0 && u.Path[l-1:] == "/" {
49 return ClientModeDir, nil
50 }
51 return ClientModeFile, nil
52}
diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go
new file mode 100644
index 0000000..d3bffeb
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_s3.go
@@ -0,0 +1,243 @@
1package getter
2
3import (
4 "fmt"
5 "io"
6 "net/url"
7 "os"
8 "path/filepath"
9 "strings"
10
11 "github.com/aws/aws-sdk-go/aws"
12 "github.com/aws/aws-sdk-go/aws/credentials"
13 "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
14 "github.com/aws/aws-sdk-go/aws/ec2metadata"
15 "github.com/aws/aws-sdk-go/aws/session"
16 "github.com/aws/aws-sdk-go/service/s3"
17)
18
19// S3Getter is a Getter implementation that will download a module from
20// a S3 bucket.
21type S3Getter struct{}
22
23func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) {
24 // Parse URL
25 region, bucket, path, _, creds, err := g.parseUrl(u)
26 if err != nil {
27 return 0, err
28 }
29
30 // Create client config
31 config := g.getAWSConfig(region, creds)
32 sess := session.New(config)
33 client := s3.New(sess)
34
35 // List the object(s) at the given prefix
36 req := &s3.ListObjectsInput{
37 Bucket: aws.String(bucket),
38 Prefix: aws.String(path),
39 }
40 resp, err := client.ListObjects(req)
41 if err != nil {
42 return 0, err
43 }
44
45 for _, o := range resp.Contents {
46 // Use file mode on exact match.
47 if *o.Key == path {
48 return ClientModeFile, nil
49 }
50
51 // Use dir mode if child keys are found.
52 if strings.HasPrefix(*o.Key, path+"/") {
53 return ClientModeDir, nil
54 }
55 }
56
57 // There was no match, so just return file mode. The download is going
58 // to fail but we will let S3 return the proper error later.
59 return ClientModeFile, nil
60}
61
62func (g *S3Getter) Get(dst string, u *url.URL) error {
63 // Parse URL
64 region, bucket, path, _, creds, err := g.parseUrl(u)
65 if err != nil {
66 return err
67 }
68
69 // Remove destination if it already exists
70 _, err = os.Stat(dst)
71 if err != nil && !os.IsNotExist(err) {
72 return err
73 }
74
75 if err == nil {
76 // Remove the destination
77 if err := os.RemoveAll(dst); err != nil {
78 return err
79 }
80 }
81
82 // Create all the parent directories
83 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
84 return err
85 }
86
87 config := g.getAWSConfig(region, creds)
88 sess := session.New(config)
89 client := s3.New(sess)
90
91 // List files in path, keep listing until no more objects are found
92 lastMarker := ""
93 hasMore := true
94 for hasMore {
95 req := &s3.ListObjectsInput{
96 Bucket: aws.String(bucket),
97 Prefix: aws.String(path),
98 }
99 if lastMarker != "" {
100 req.Marker = aws.String(lastMarker)
101 }
102
103 resp, err := client.ListObjects(req)
104 if err != nil {
105 return err
106 }
107
108 hasMore = aws.BoolValue(resp.IsTruncated)
109
110 // Get each object storing each file relative to the destination path
111 for _, object := range resp.Contents {
112 lastMarker = aws.StringValue(object.Key)
113 objPath := aws.StringValue(object.Key)
114
115 // If the key ends with a backslash assume it is a directory and ignore
116 if strings.HasSuffix(objPath, "/") {
117 continue
118 }
119
120 // Get the object destination path
121 objDst, err := filepath.Rel(path, objPath)
122 if err != nil {
123 return err
124 }
125 objDst = filepath.Join(dst, objDst)
126
127 if err := g.getObject(client, objDst, bucket, objPath, ""); err != nil {
128 return err
129 }
130 }
131 }
132
133 return nil
134}
135
136func (g *S3Getter) GetFile(dst string, u *url.URL) error {
137 region, bucket, path, version, creds, err := g.parseUrl(u)
138 if err != nil {
139 return err
140 }
141
142 config := g.getAWSConfig(region, creds)
143 sess := session.New(config)
144 client := s3.New(sess)
145 return g.getObject(client, dst, bucket, path, version)
146}
147
148func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) error {
149 req := &s3.GetObjectInput{
150 Bucket: aws.String(bucket),
151 Key: aws.String(key),
152 }
153 if version != "" {
154 req.VersionId = aws.String(version)
155 }
156
157 resp, err := client.GetObject(req)
158 if err != nil {
159 return err
160 }
161
162 // Create all the parent directories
163 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
164 return err
165 }
166
167 f, err := os.Create(dst)
168 if err != nil {
169 return err
170 }
171 defer f.Close()
172
173 _, err = io.Copy(f, resp.Body)
174 return err
175}
176
177func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config {
178 conf := &aws.Config{}
179 if creds == nil {
180 // Grab the metadata URL
181 metadataURL := os.Getenv("AWS_METADATA_URL")
182 if metadataURL == "" {
183 metadataURL = "http://169.254.169.254:80/latest"
184 }
185
186 creds = credentials.NewChainCredentials(
187 []credentials.Provider{
188 &credentials.EnvProvider{},
189 &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
190 &ec2rolecreds.EC2RoleProvider{
191 Client: ec2metadata.New(session.New(&aws.Config{
192 Endpoint: aws.String(metadataURL),
193 })),
194 },
195 })
196 }
197
198 conf.Credentials = creds
199 if region != "" {
200 conf.Region = aws.String(region)
201 }
202
203 return conf
204}
205
206func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) {
207 // Expected host style: s3.amazonaws.com. They always have 3 parts,
208 // although the first may differ if we're accessing a specific region.
209 hostParts := strings.Split(u.Host, ".")
210 if len(hostParts) != 3 {
211 err = fmt.Errorf("URL is not a valid S3 URL")
212 return
213 }
214
215 // Parse the region out of the first part of the host
216 region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3")
217 if region == "" {
218 region = "us-east-1"
219 }
220
221 pathParts := strings.SplitN(u.Path, "/", 3)
222 if len(pathParts) != 3 {
223 err = fmt.Errorf("URL is not a valid S3 URL")
224 return
225 }
226
227 bucket = pathParts[1]
228 path = pathParts[2]
229 version = u.Query().Get("version")
230
231 _, hasAwsId := u.Query()["aws_access_key_id"]
232 _, hasAwsSecret := u.Query()["aws_access_key_secret"]
233 _, hasAwsToken := u.Query()["aws_access_token"]
234 if hasAwsId || hasAwsSecret || hasAwsToken {
235 creds = credentials.NewStaticCredentials(
236 u.Query().Get("aws_access_key_id"),
237 u.Query().Get("aws_access_key_secret"),
238 u.Query().Get("aws_access_token"),
239 )
240 }
241
242 return
243}
diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url.go b/vendor/github.com/hashicorp/go-getter/helper/url/url.go
new file mode 100644
index 0000000..02497c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/helper/url/url.go
@@ -0,0 +1,14 @@
1package url
2
3import (
4 "net/url"
5)
6
7// Parse parses rawURL into a URL structure.
8// The rawURL may be relative or absolute.
9//
10// Parse is a wrapper for the Go stdlib net/url Parse function, but returns
11// Windows "safe" URLs on Windows platforms.
12func Parse(rawURL string) (*url.URL, error) {
13 return parse(rawURL)
14}
diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go
new file mode 100644
index 0000000..ed1352a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go
@@ -0,0 +1,11 @@
1// +build !windows
2
3package url
4
5import (
6 "net/url"
7)
8
9func parse(rawURL string) (*url.URL, error) {
10 return url.Parse(rawURL)
11}
diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go
new file mode 100644
index 0000000..4655226
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go
@@ -0,0 +1,40 @@
1package url
2
3import (
4 "fmt"
5 "net/url"
6 "path/filepath"
7 "strings"
8)
9
10func parse(rawURL string) (*url.URL, error) {
11 // Make sure we're using "/" since URLs are "/"-based.
12 rawURL = filepath.ToSlash(rawURL)
13
14 u, err := url.Parse(rawURL)
15 if err != nil {
16 return nil, err
17 }
18
19 if len(rawURL) > 1 && rawURL[1] == ':' {
20 // Assume we're dealing with a drive letter file path where the drive
21 // letter has been parsed into the URL Scheme, and the rest of the path
22 // has been parsed into the URL Path without the leading ':' character.
23 u.Path = fmt.Sprintf("%s:%s", string(rawURL[0]), u.Path)
24 u.Scheme = ""
25 }
26
27 if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") {
28 // Assume we're dealing with a drive letter file path where the drive
29 // letter has been parsed into the URL Host.
30 u.Path = fmt.Sprintf("%s%s", u.Host, u.Path)
31 u.Host = ""
32 }
33
34 // Remove leading slash for absolute file paths.
35 if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' {
36 u.Path = u.Path[1:]
37 }
38
39 return u, err
40}
diff --git a/vendor/github.com/hashicorp/go-getter/netrc.go b/vendor/github.com/hashicorp/go-getter/netrc.go
new file mode 100644
index 0000000..c7f6a3f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/netrc.go
@@ -0,0 +1,67 @@
1package getter
2
3import (
4 "fmt"
5 "net/url"
6 "os"
7 "runtime"
8
9 "github.com/bgentry/go-netrc/netrc"
10 "github.com/mitchellh/go-homedir"
11)
12
13// addAuthFromNetrc adds auth information to the URL from the user's
14// netrc file if it can be found. This will only add the auth info
15// if the URL doesn't already have auth info specified and the
16// the username is blank.
17func addAuthFromNetrc(u *url.URL) error {
18 // If the URL already has auth information, do nothing
19 if u.User != nil && u.User.Username() != "" {
20 return nil
21 }
22
23 // Get the netrc file path
24 path := os.Getenv("NETRC")
25 if path == "" {
26 filename := ".netrc"
27 if runtime.GOOS == "windows" {
28 filename = "_netrc"
29 }
30
31 var err error
32 path, err = homedir.Expand("~/" + filename)
33 if err != nil {
34 return err
35 }
36 }
37
38 // If the file is not a file, then do nothing
39 if fi, err := os.Stat(path); err != nil {
40 // File doesn't exist, do nothing
41 if os.IsNotExist(err) {
42 return nil
43 }
44
45 // Some other error!
46 return err
47 } else if fi.IsDir() {
48 // File is directory, ignore
49 return nil
50 }
51
52 // Load up the netrc file
53 net, err := netrc.ParseFile(path)
54 if err != nil {
55 return fmt.Errorf("Error parsing netrc file at %q: %s", path, err)
56 }
57
58 machine := net.FindMachine(u.Host)
59 if machine == nil {
60 // Machine not found, no problem
61 return nil
62 }
63
64 // Set the user info
65 u.User = url.UserPassword(machine.Login, machine.Password)
66 return nil
67}
diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go
new file mode 100644
index 0000000..4d5ee3c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/source.go
@@ -0,0 +1,36 @@
1package getter
2
3import (
4 "strings"
5)
6
7// SourceDirSubdir takes a source and returns a tuple of the URL without
8// the subdir and the URL with the subdir.
9func SourceDirSubdir(src string) (string, string) {
10 // Calcaulate an offset to avoid accidentally marking the scheme
11 // as the dir.
12 var offset int
13 if idx := strings.Index(src, "://"); idx > -1 {
14 offset = idx + 3
15 }
16
17 // First see if we even have an explicit subdir
18 idx := strings.Index(src[offset:], "//")
19 if idx == -1 {
20 return src, ""
21 }
22
23 idx += offset
24 subdir := src[idx+2:]
25 src = src[:idx]
26
27 // Next, check if we have query parameters and push them onto the
28 // URL.
29 if idx = strings.Index(subdir, "?"); idx > -1 {
30 query := subdir[idx:]
31 subdir = subdir[:idx]
32 src += query
33 }
34
35 return src, subdir
36}
diff --git a/vendor/github.com/hashicorp/go-getter/storage.go b/vendor/github.com/hashicorp/go-getter/storage.go
new file mode 100644
index 0000000..2bc6b9e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/storage.go
@@ -0,0 +1,13 @@
1package getter
2
3// Storage is an interface that knows how to lookup downloaded directories
4// as well as download and update directories from their sources into the
5// proper location.
6type Storage interface {
7 // Dir returns the directory on local disk where the directory source
8 // can be loaded from.
9 Dir(string) (string, bool, error)
10
11 // Get will download and optionally update the given directory.
12 Get(string, string, bool) error
13}
diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE
new file mode 100644
index 0000000..82b4de9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/LICENSE
@@ -0,0 +1,353 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. “Contributor”
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. “Contributor Version”
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor’s Contribution.
14
151.3. “Contribution”
16
17 means Covered Software of a particular Contributor.
18
191.4. “Covered Software”
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. “Incompatible With Secondary Licenses”
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of version
33 1.1 or earlier of the License, but not also under the terms of a
34 Secondary License.
35
361.6. “Executable Form”
37
38 means any form of the work other than Source Code Form.
39
401.7. “Larger Work”
41
42 means a work that combines Covered Software with other material, in a separate
43 file or files, that is not Covered Software.
44
451.8. “License”
46
47 means this document.
48
491.9. “Licensable”
50
51 means having the right to grant, to the maximum extent possible, whether at the
52 time of the initial grant or subsequently, any and all of the rights conveyed by
53 this License.
54
551.10. “Modifications”
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to, deletion
60 from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. “Patent Claims” of a Contributor
65
66 means any patent claim(s), including without limitation, method, process,
67 and apparatus claims, in any patent Licensable by such Contributor that
68 would be infringed, but for the grant of the License, by the making,
69 using, selling, offering for sale, having made, import, or transfer of
70 either its Contributions or its Contributor Version.
71
721.12. “Secondary License”
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. “Source Code Form”
79
80 means the form of the work preferred for making modifications.
81
821.14. “You” (or “Your”)
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, “You” includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, “control” means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or as
104 part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its Contributions
108 or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution become
113 effective for each Contribution on the date the Contributor first distributes
114 such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under this
119 License. No additional rights or licenses will be implied from the distribution
120 or licensing of Covered Software under this License. Notwithstanding Section
121 2.1(b) above, no patent license is granted by a Contributor:
122
123 a. for any code that a Contributor has removed from Covered Software; or
124
125 b. for infringements caused by: (i) Your and any other third party’s
126 modifications of Covered Software, or (ii) the combination of its
127 Contributions with other software (except as part of its Contributor
128 Version); or
129
130 c. under Patent Claims infringed by Covered Software in the absence of its
131 Contributions.
132
133 This License does not grant any rights in the trademarks, service marks, or
134 logos of any Contributor (except as may be necessary to comply with the
135 notice requirements in Section 3.4).
136
1372.4. Subsequent Licenses
138
139 No Contributor makes additional grants as a result of Your choice to
140 distribute the Covered Software under a subsequent version of this License
141 (see Section 10.2) or under the terms of a Secondary License (if permitted
142 under the terms of Section 3.3).
143
1442.5. Representation
145
146 Each Contributor represents that the Contributor believes its Contributions
147 are its original creation(s) or it has sufficient rights to grant the
148 rights to its Contributions conveyed by this License.
149
1502.6. Fair Use
151
152 This License is not intended to limit any rights You have under applicable
153 copyright doctrines of fair use, fair dealing, or other equivalents.
154
1552.7. Conditions
156
157 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
158 Section 2.1.
159
160
1613. Responsibilities
162
1633.1. Distribution of Source Form
164
165 All distribution of Covered Software in Source Code Form, including any
166 Modifications that You create or to which You contribute, must be under the
167 terms of this License. You must inform recipients that the Source Code Form
168 of the Covered Software is governed by the terms of this License, and how
169 they can obtain a copy of this License. You may not attempt to alter or
170 restrict the recipients’ rights in the Source Code Form.
171
1723.2. Distribution of Executable Form
173
174 If You distribute Covered Software in Executable Form then:
175
176 a. such Covered Software must also be made available in Source Code Form,
177 as described in Section 3.1, and You must inform recipients of the
178 Executable Form how they can obtain a copy of such Source Code Form by
179 reasonable means in a timely manner, at a charge no more than the cost
180 of distribution to the recipient; and
181
182 b. You may distribute such Executable Form under the terms of this License,
183 or sublicense it under different terms, provided that the license for
184 the Executable Form does not attempt to limit or alter the recipients’
185 rights in the Source Code Form under this License.
186
1873.3. Distribution of a Larger Work
188
189 You may create and distribute a Larger Work under terms of Your choice,
190 provided that You also comply with the requirements of this License for the
191 Covered Software. If the Larger Work is a combination of Covered Software
192 with a work governed by one or more Secondary Licenses, and the Covered
193 Software is not Incompatible With Secondary Licenses, this License permits
194 You to additionally distribute such Covered Software under the terms of
195 such Secondary License(s), so that the recipient of the Larger Work may, at
196 their option, further distribute the Covered Software under the terms of
197 either this License or such Secondary License(s).
198
1993.4. Notices
200
201 You may not remove or alter the substance of any license notices (including
202 copyright notices, patent notices, disclaimers of warranty, or limitations
203 of liability) contained within the Source Code Form of the Covered
204 Software, except that You may alter any license notices to the extent
205 required to remedy known factual inaccuracies.
206
2073.5. Application of Additional Terms
208
209 You may choose to offer, and to charge a fee for, warranty, support,
210 indemnity or liability obligations to one or more recipients of Covered
211 Software. However, You may do so only on Your own behalf, and not on behalf
212 of any Contributor. You must make it absolutely clear that any such
213 warranty, support, indemnity, or liability obligation is offered by You
214 alone, and You hereby agree to indemnify every Contributor for any
215 liability incurred by such Contributor as a result of warranty, support,
216 indemnity or liability terms You offer. You may include additional
217 disclaimers of warranty and limitations of liability specific to any
218 jurisdiction.
219
2204. Inability to Comply Due to Statute or Regulation
221
222 If it is impossible for You to comply with any of the terms of this License
223 with respect to some or all of the Covered Software due to statute, judicial
224 order, or regulation then You must: (a) comply with the terms of this License
225 to the maximum extent possible; and (b) describe the limitations and the code
226 they affect. Such description must be placed in a text file included with all
227 distributions of the Covered Software under this License. Except to the
228 extent prohibited by statute or regulation, such description must be
229 sufficiently detailed for a recipient of ordinary skill to be able to
230 understand it.
231
2325. Termination
233
2345.1. The rights granted under this License will terminate automatically if You
235 fail to comply with any of its terms. However, if You become compliant,
236 then the rights granted under this License from a particular Contributor
237 are reinstated (a) provisionally, unless and until such Contributor
238 explicitly and finally terminates Your grants, and (b) on an ongoing basis,
239 if such Contributor fails to notify You of the non-compliance by some
240 reasonable means prior to 60 days after You have come back into compliance.
241 Moreover, Your grants from a particular Contributor are reinstated on an
242 ongoing basis if such Contributor notifies You of the non-compliance by
243 some reasonable means, this is the first time You have received notice of
244 non-compliance with this License from such Contributor, and You become
245 compliant prior to 30 days after Your receipt of the notice.
246
2475.2. If You initiate litigation against any entity by asserting a patent
248 infringement claim (excluding declaratory judgment actions, counter-claims,
249 and cross-claims) alleging that a Contributor Version directly or
250 indirectly infringes any patent, then the rights granted to You by any and
251 all Contributors for the Covered Software under Section 2.1 of this License
252 shall terminate.
253
2545.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
255 license agreements (excluding distributors and resellers) which have been
256 validly granted by You or Your distributors under this License prior to
257 termination shall survive termination.
258
2596. Disclaimer of Warranty
260
261 Covered Software is provided under this License on an “as is” basis, without
262 warranty of any kind, either expressed, implied, or statutory, including,
263 without limitation, warranties that the Covered Software is free of defects,
264 merchantable, fit for a particular purpose or non-infringing. The entire
265 risk as to the quality and performance of the Covered Software is with You.
266 Should any Covered Software prove defective in any respect, You (not any
267 Contributor) assume the cost of any necessary servicing, repair, or
268 correction. This disclaimer of warranty constitutes an essential part of this
269 License. No use of any Covered Software is authorized under this License
270 except under this disclaimer.
271
2727. Limitation of Liability
273
274 Under no circumstances and under no legal theory, whether tort (including
275 negligence), contract, or otherwise, shall any Contributor, or anyone who
276 distributes Covered Software as permitted above, be liable to You for any
277 direct, indirect, special, incidental, or consequential damages of any
278 character including, without limitation, damages for lost profits, loss of
279 goodwill, work stoppage, computer failure or malfunction, or any and all
280 other commercial damages or losses, even if such party shall have been
281 informed of the possibility of such damages. This limitation of liability
282 shall not apply to liability for death or personal injury resulting from such
283 party’s negligence to the extent applicable law prohibits such limitation.
284 Some jurisdictions do not allow the exclusion or limitation of incidental or
285 consequential damages, so this exclusion and limitation may not apply to You.
286
2878. Litigation
288
289 Any litigation relating to this License may be brought only in the courts of
290 a jurisdiction where the defendant maintains its principal place of business
291 and such litigation shall be governed by laws of that jurisdiction, without
292 reference to its conflict-of-law provisions. Nothing in this Section shall
293 prevent a party’s ability to bring cross-claims or counter-claims.
294
2959. Miscellaneous
296
297 This License represents the complete agreement concerning the subject matter
298 hereof. If any provision of this License is held to be unenforceable, such
299 provision shall be reformed only to the extent necessary to make it
300 enforceable. Any law or regulation which provides that the language of a
301 contract shall be construed against the drafter shall not be used to construe
302 this License against a Contributor.
303
304
30510. Versions of the License
306
30710.1. New Versions
308
309 Mozilla Foundation is the license steward. Except as provided in Section
310 10.3, no one other than the license steward has the right to modify or
311 publish new versions of this License. Each version will be given a
312 distinguishing version number.
313
31410.2. Effect of New Versions
315
316 You may distribute the Covered Software under the terms of the version of
317 the License under which You originally received the Covered Software, or
318 under the terms of any subsequent version published by the license
319 steward.
320
32110.3. Modified Versions
322
323 If you create software not governed by this License, and you want to
324 create a new license for such software, you may create and use a modified
325 version of this License if you rename the license and remove any
326 references to the name of the license steward (except to note that such
327 modified license differs from this License).
328
32910.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
330 If You choose to distribute Source Code Form that is Incompatible With
331 Secondary Licenses under the terms of this version of the License, the
332 notice described in Exhibit B of this License must be attached.
333
334Exhibit A - Source Code Form License Notice
335
336 This Source Code Form is subject to the
337 terms of the Mozilla Public License, v.
338 2.0. If a copy of the MPL was not
339 distributed with this file, You can
340 obtain one at
341 http://mozilla.org/MPL/2.0/.
342
343If it is not possible or desirable to put the notice in a particular file, then
344You may include the notice in a location (such as a LICENSE file in a relevant
345directory) where a recipient would be likely to look for such a notice.
346
347You may add additional accurate notices of copyright ownership.
348
349Exhibit B - “Incompatible With Secondary Licenses” Notice
350
351 This Source Code Form is “Incompatible
352 With Secondary Licenses”, as defined by
353 the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md
new file mode 100644
index 0000000..e81be50
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/README.md
@@ -0,0 +1,91 @@
1# go-multierror
2
3`go-multierror` is a package for Go that provides a mechanism for
4representing a list of `error` values as a single `error`.
5
6This allows a function in Go to return an `error` that might actually
7be a list of errors. If the caller knows this, they can unwrap the
8list and access the errors. If the caller doesn't know, the error
9formats to a nice human-readable format.
10
11`go-multierror` implements the
12[errwrap](https://github.com/hashicorp/errwrap) interface so that it can
13be used with that library, as well.
14
15## Installation and Docs
16
17Install using `go get github.com/hashicorp/go-multierror`.
18
19Full documentation is available at
20http://godoc.org/github.com/hashicorp/go-multierror
21
22## Usage
23
24go-multierror is easy to use and purposely built to be unobtrusive in
25existing Go applications/libraries that may not be aware of it.
26
27**Building a list of errors**
28
29The `Append` function is used to create a list of errors. This function
30behaves a lot like the Go built-in `append` function: it doesn't matter
31if the first argument is nil, a `multierror.Error`, or any other `error`,
32the function behaves as you would expect.
33
34```go
35var result error
36
37if err := step1(); err != nil {
38 result = multierror.Append(result, err)
39}
40if err := step2(); err != nil {
41 result = multierror.Append(result, err)
42}
43
44return result
45```
46
47**Customizing the formatting of the errors**
48
49By specifying a custom `ErrorFormat`, you can customize the format
50of the `Error() string` function:
51
52```go
53var result *multierror.Error
54
55// ... accumulate errors here, maybe using Append
56
57if result != nil {
58 result.ErrorFormat = func([]error) string {
59 return "errors!"
60 }
61}
62```
63
64**Accessing the list of errors**
65
66`multierror.Error` implements `error` so if the caller doesn't know about
67multierror, it will work just fine. But if you're aware a multierror might
68be returned, you can use type switches to access the list of errors:
69
70```go
71if err := something(); err != nil {
72 if merr, ok := err.(*multierror.Error); ok {
73 // Use merr.Errors
74 }
75}
76```
77
78**Returning a multierror only if there are errors**
79
80If you build a `multierror.Error`, you can use the `ErrorOrNil` function
81to return an `error` implementation only if there are errors to return:
82
83```go
84var result *multierror.Error
85
86// ... accumulate errors here
87
88// Return the `error` only if errors were added to the multierror, otherwise
89// return nil since there are no errors.
90return result.ErrorOrNil()
91```
diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go
new file mode 100644
index 0000000..00afa9b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/append.go
@@ -0,0 +1,37 @@
1package multierror
2
3// Append is a helper function that will append more errors
4// onto an Error in order to create a larger multi-error.
5//
6// If err is not a multierror.Error, then it will be turned into
7// one. If any of the errs are multierr.Error, they will be flattened
8// one level into err.
9func Append(err error, errs ...error) *Error {
10 switch err := err.(type) {
11 case *Error:
12 // Typed nils can reach here, so initialize if we are nil
13 if err == nil {
14 err = new(Error)
15 }
16
17 // Go through each error and flatten
18 for _, e := range errs {
19 switch e := e.(type) {
20 case *Error:
21 err.Errors = append(err.Errors, e.Errors...)
22 default:
23 err.Errors = append(err.Errors, e)
24 }
25 }
26
27 return err
28 default:
29 newErrs := make([]error, 0, len(errs)+1)
30 if err != nil {
31 newErrs = append(newErrs, err)
32 }
33 newErrs = append(newErrs, errs...)
34
35 return Append(&Error{}, newErrs...)
36 }
37}
diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go
new file mode 100644
index 0000000..aab8e9a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/flatten.go
@@ -0,0 +1,26 @@
1package multierror
2
3// Flatten flattens the given error, merging any *Errors together into
4// a single *Error.
5func Flatten(err error) error {
6 // If it isn't an *Error, just return the error as-is
7 if _, ok := err.(*Error); !ok {
8 return err
9 }
10
11 // Otherwise, make the result and flatten away!
12 flatErr := new(Error)
13 flatten(err, flatErr)
14 return flatErr
15}
16
17func flatten(err error, flatErr *Error) {
18 switch err := err.(type) {
19 case *Error:
20 for _, e := range err.Errors {
21 flatten(e, flatErr)
22 }
23 default:
24 flatErr.Errors = append(flatErr.Errors, err)
25 }
26}
diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go
new file mode 100644
index 0000000..bb65a12
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/format.go
@@ -0,0 +1,23 @@
1package multierror
2
3import (
4 "fmt"
5 "strings"
6)
7
8// ErrorFormatFunc is a function callback that is called by Error to
9// turn the list of errors into a string.
10type ErrorFormatFunc func([]error) string
11
12// ListFormatFunc is a basic formatter that outputs the number of errors
13// that occurred along with a bullet point list of the errors.
14func ListFormatFunc(es []error) string {
15 points := make([]string, len(es))
16 for i, err := range es {
17 points[i] = fmt.Sprintf("* %s", err)
18 }
19
20 return fmt.Sprintf(
21 "%d error(s) occurred:\n\n%s",
22 len(es), strings.Join(points, "\n"))
23}
diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go
new file mode 100644
index 0000000..2ea0827
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/multierror.go
@@ -0,0 +1,51 @@
1package multierror
2
3import (
4 "fmt"
5)
6
7// Error is an error type to track multiple errors. This is used to
8// accumulate errors in cases and return them as a single "error".
9type Error struct {
10 Errors []error
11 ErrorFormat ErrorFormatFunc
12}
13
14func (e *Error) Error() string {
15 fn := e.ErrorFormat
16 if fn == nil {
17 fn = ListFormatFunc
18 }
19
20 return fn(e.Errors)
21}
22
23// ErrorOrNil returns an error interface if this Error represents
24// a list of errors, or returns nil if the list of errors is empty. This
25// function is useful at the end of accumulation to make sure that the value
26// returned represents the existence of errors.
27func (e *Error) ErrorOrNil() error {
28 if e == nil {
29 return nil
30 }
31 if len(e.Errors) == 0 {
32 return nil
33 }
34
35 return e
36}
37
38func (e *Error) GoString() string {
39 return fmt.Sprintf("*%#v", *e)
40}
41
42// WrappedErrors returns the list of errors that this Error is wrapping.
43// It is an implementatin of the errwrap.Wrapper interface so that
44// multierror.Error can be used with that library.
45//
46// This method is not safe to be called concurrently and is no different
47// than accessing the Errors field directly. It is implementd only to
48// satisfy the errwrap.Wrapper interface.
49func (e *Error) WrappedErrors() []error {
50 return e.Errors
51}
diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go
new file mode 100644
index 0000000..5c477ab
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/prefix.go
@@ -0,0 +1,37 @@
1package multierror
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/errwrap"
7)
8
9// Prefix is a helper function that will prefix some text
10// to the given error. If the error is a multierror.Error, then
11// it will be prefixed to each wrapped error.
12//
13// This is useful to use when appending multiple multierrors
14// together in order to give better scoping.
15func Prefix(err error, prefix string) error {
16 if err == nil {
17 return nil
18 }
19
20 format := fmt.Sprintf("%s {{err}}", prefix)
21 switch err := err.(type) {
22 case *Error:
23 // Typed nils can reach here, so initialize if we are nil
24 if err == nil {
25 err = new(Error)
26 }
27
28 // Wrap each of the errors
29 for i, e := range err.Errors {
30 err.Errors[i] = errwrap.Wrapf(format, e)
31 }
32
33 return err
34 default:
35 return errwrap.Wrapf(format, err)
36 }
37}
diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE
new file mode 100644
index 0000000..82b4de9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/LICENSE
@@ -0,0 +1,353 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. “Contributor”
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. “Contributor Version”
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor’s Contribution.
14
151.3. “Contribution”
16
17 means Covered Software of a particular Contributor.
18
191.4. “Covered Software”
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. “Incompatible With Secondary Licenses”
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of version
33 1.1 or earlier of the License, but not also under the terms of a
34 Secondary License.
35
361.6. “Executable Form”
37
38 means any form of the work other than Source Code Form.
39
401.7. “Larger Work”
41
42 means a work that combines Covered Software with other material, in a separate
43 file or files, that is not Covered Software.
44
451.8. “License”
46
47 means this document.
48
491.9. “Licensable”
50
51 means having the right to grant, to the maximum extent possible, whether at the
52 time of the initial grant or subsequently, any and all of the rights conveyed by
53 this License.
54
551.10. “Modifications”
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to, deletion
60 from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. “Patent Claims” of a Contributor
65
66 means any patent claim(s), including without limitation, method, process,
67 and apparatus claims, in any patent Licensable by such Contributor that
68 would be infringed, but for the grant of the License, by the making,
69 using, selling, offering for sale, having made, import, or transfer of
70 either its Contributions or its Contributor Version.
71
721.12. “Secondary License”
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. “Source Code Form”
79
80 means the form of the work preferred for making modifications.
81
821.14. “You” (or “Your”)
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, “You” includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, “control” means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or as
104 part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its Contributions
108 or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution become
113 effective for each Contribution on the date the Contributor first distributes
114 such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under this
119 License. No additional rights or licenses will be implied from the distribution
120 or licensing of Covered Software under this License. Notwithstanding Section
121 2.1(b) above, no patent license is granted by a Contributor:
122
123 a. for any code that a Contributor has removed from Covered Software; or
124
125 b. for infringements caused by: (i) Your and any other third party’s
126 modifications of Covered Software, or (ii) the combination of its
127 Contributions with other software (except as part of its Contributor
128 Version); or
129
130 c. under Patent Claims infringed by Covered Software in the absence of its
131 Contributions.
132
133 This License does not grant any rights in the trademarks, service marks, or
134 logos of any Contributor (except as may be necessary to comply with the
135 notice requirements in Section 3.4).
136
1372.4. Subsequent Licenses
138
139 No Contributor makes additional grants as a result of Your choice to
140 distribute the Covered Software under a subsequent version of this License
141 (see Section 10.2) or under the terms of a Secondary License (if permitted
142 under the terms of Section 3.3).
143
1442.5. Representation
145
146 Each Contributor represents that the Contributor believes its Contributions
147 are its original creation(s) or it has sufficient rights to grant the
148 rights to its Contributions conveyed by this License.
149
1502.6. Fair Use
151
152 This License is not intended to limit any rights You have under applicable
153 copyright doctrines of fair use, fair dealing, or other equivalents.
154
1552.7. Conditions
156
157 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
158 Section 2.1.
159
160
1613. Responsibilities
162
1633.1. Distribution of Source Form
164
165 All distribution of Covered Software in Source Code Form, including any
166 Modifications that You create or to which You contribute, must be under the
167 terms of this License. You must inform recipients that the Source Code Form
168 of the Covered Software is governed by the terms of this License, and how
169 they can obtain a copy of this License. You may not attempt to alter or
170 restrict the recipients’ rights in the Source Code Form.
171
1723.2. Distribution of Executable Form
173
174 If You distribute Covered Software in Executable Form then:
175
176 a. such Covered Software must also be made available in Source Code Form,
177 as described in Section 3.1, and You must inform recipients of the
178 Executable Form how they can obtain a copy of such Source Code Form by
179 reasonable means in a timely manner, at a charge no more than the cost
180 of distribution to the recipient; and
181
182 b. You may distribute such Executable Form under the terms of this License,
183 or sublicense it under different terms, provided that the license for
184 the Executable Form does not attempt to limit or alter the recipients’
185 rights in the Source Code Form under this License.
186
1873.3. Distribution of a Larger Work
188
189 You may create and distribute a Larger Work under terms of Your choice,
190 provided that You also comply with the requirements of this License for the
191 Covered Software. If the Larger Work is a combination of Covered Software
192 with a work governed by one or more Secondary Licenses, and the Covered
193 Software is not Incompatible With Secondary Licenses, this License permits
194 You to additionally distribute such Covered Software under the terms of
195 such Secondary License(s), so that the recipient of the Larger Work may, at
196 their option, further distribute the Covered Software under the terms of
197 either this License or such Secondary License(s).
198
1993.4. Notices
200
201 You may not remove or alter the substance of any license notices (including
202 copyright notices, patent notices, disclaimers of warranty, or limitations
203 of liability) contained within the Source Code Form of the Covered
204 Software, except that You may alter any license notices to the extent
205 required to remedy known factual inaccuracies.
206
2073.5. Application of Additional Terms
208
209 You may choose to offer, and to charge a fee for, warranty, support,
210 indemnity or liability obligations to one or more recipients of Covered
211 Software. However, You may do so only on Your own behalf, and not on behalf
212 of any Contributor. You must make it absolutely clear that any such
213 warranty, support, indemnity, or liability obligation is offered by You
214 alone, and You hereby agree to indemnify every Contributor for any
215 liability incurred by such Contributor as a result of warranty, support,
216 indemnity or liability terms You offer. You may include additional
217 disclaimers of warranty and limitations of liability specific to any
218 jurisdiction.
219
2204. Inability to Comply Due to Statute or Regulation
221
222 If it is impossible for You to comply with any of the terms of this License
223 with respect to some or all of the Covered Software due to statute, judicial
224 order, or regulation then You must: (a) comply with the terms of this License
225 to the maximum extent possible; and (b) describe the limitations and the code
226 they affect. Such description must be placed in a text file included with all
227 distributions of the Covered Software under this License. Except to the
228 extent prohibited by statute or regulation, such description must be
229 sufficiently detailed for a recipient of ordinary skill to be able to
230 understand it.
231
2325. Termination
233
2345.1. The rights granted under this License will terminate automatically if You
235 fail to comply with any of its terms. However, if You become compliant,
236 then the rights granted under this License from a particular Contributor
237 are reinstated (a) provisionally, unless and until such Contributor
238 explicitly and finally terminates Your grants, and (b) on an ongoing basis,
239 if such Contributor fails to notify You of the non-compliance by some
240 reasonable means prior to 60 days after You have come back into compliance.
241 Moreover, Your grants from a particular Contributor are reinstated on an
242 ongoing basis if such Contributor notifies You of the non-compliance by
243 some reasonable means, this is the first time You have received notice of
244 non-compliance with this License from such Contributor, and You become
245 compliant prior to 30 days after Your receipt of the notice.
246
2475.2. If You initiate litigation against any entity by asserting a patent
248 infringement claim (excluding declaratory judgment actions, counter-claims,
249 and cross-claims) alleging that a Contributor Version directly or
250 indirectly infringes any patent, then the rights granted to You by any and
251 all Contributors for the Covered Software under Section 2.1 of this License
252 shall terminate.
253
2545.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
255 license agreements (excluding distributors and resellers) which have been
256 validly granted by You or Your distributors under this License prior to
257 termination shall survive termination.
258
2596. Disclaimer of Warranty
260
261 Covered Software is provided under this License on an “as is” basis, without
262 warranty of any kind, either expressed, implied, or statutory, including,
263 without limitation, warranties that the Covered Software is free of defects,
264 merchantable, fit for a particular purpose or non-infringing. The entire
265 risk as to the quality and performance of the Covered Software is with You.
266 Should any Covered Software prove defective in any respect, You (not any
267 Contributor) assume the cost of any necessary servicing, repair, or
268 correction. This disclaimer of warranty constitutes an essential part of this
269 License. No use of any Covered Software is authorized under this License
270 except under this disclaimer.
271
2727. Limitation of Liability
273
274 Under no circumstances and under no legal theory, whether tort (including
275 negligence), contract, or otherwise, shall any Contributor, or anyone who
276 distributes Covered Software as permitted above, be liable to You for any
277 direct, indirect, special, incidental, or consequential damages of any
278 character including, without limitation, damages for lost profits, loss of
279 goodwill, work stoppage, computer failure or malfunction, or any and all
280 other commercial damages or losses, even if such party shall have been
281 informed of the possibility of such damages. This limitation of liability
282 shall not apply to liability for death or personal injury resulting from such
283 party’s negligence to the extent applicable law prohibits such limitation.
284 Some jurisdictions do not allow the exclusion or limitation of incidental or
285 consequential damages, so this exclusion and limitation may not apply to You.
286
2878. Litigation
288
289 Any litigation relating to this License may be brought only in the courts of
290 a jurisdiction where the defendant maintains its principal place of business
291 and such litigation shall be governed by laws of that jurisdiction, without
292 reference to its conflict-of-law provisions. Nothing in this Section shall
293 prevent a party’s ability to bring cross-claims or counter-claims.
294
2959. Miscellaneous
296
297 This License represents the complete agreement concerning the subject matter
298 hereof. If any provision of this License is held to be unenforceable, such
299 provision shall be reformed only to the extent necessary to make it
300 enforceable. Any law or regulation which provides that the language of a
301 contract shall be construed against the drafter shall not be used to construe
302 this License against a Contributor.
303
304
30510. Versions of the License
306
30710.1. New Versions
308
309 Mozilla Foundation is the license steward. Except as provided in Section
310 10.3, no one other than the license steward has the right to modify or
311 publish new versions of this License. Each version will be given a
312 distinguishing version number.
313
31410.2. Effect of New Versions
315
316 You may distribute the Covered Software under the terms of the version of
317 the License under which You originally received the Covered Software, or
318 under the terms of any subsequent version published by the license
319 steward.
320
32110.3. Modified Versions
322
323 If you create software not governed by this License, and you want to
324 create a new license for such software, you may create and use a modified
325 version of this License if you rename the license and remove any
326 references to the name of the license steward (except to note that such
327 modified license differs from this License).
328
32910.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
330 If You choose to distribute Source Code Form that is Incompatible With
331 Secondary Licenses under the terms of this version of the License, the
332 notice described in Exhibit B of this License must be attached.
333
334Exhibit A - Source Code Form License Notice
335
336 This Source Code Form is subject to the
337 terms of the Mozilla Public License, v.
338 2.0. If a copy of the MPL was not
339 distributed with this file, You can
340 obtain one at
341 http://mozilla.org/MPL/2.0/.
342
343If it is not possible or desirable to put the notice in a particular file, then
344You may include the notice in a location (such as a LICENSE file in a relevant
345directory) where a recipient would be likely to look for such a notice.
346
347You may add additional accurate notices of copyright ownership.
348
349Exhibit B - “Incompatible With Secondary Licenses” Notice
350
351 This Source Code Form is “Incompatible
352 With Secondary Licenses”, as defined by
353 the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md
new file mode 100644
index 0000000..2058cfb
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/README.md
@@ -0,0 +1,161 @@
1# Go Plugin System over RPC
2
3`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system
4that has been in use by HashiCorp tooling for over 3 years. While initially
5created for [Packer](https://www.packer.io), it has since been used by
6[Terraform](https://www.terraform.io) and [Otto](https://www.ottoproject.io),
7with plans to also use it for [Nomad](https://www.nomadproject.io) and
8[Vault](https://www.vaultproject.io).
9
10While the plugin system is over RPC, it is currently only designed to work
11over a local [reliable] network. Plugins over a real network are not supported
12and will lead to unexpected behavior.
13
14This plugin system has been used on millions of machines across many different
15projects and has proven to be battle hardened and ready for production use.
16
17## Features
18
19The HashiCorp plugin system supports a number of features:
20
21**Plugins are Go interface implementations.** This makes writing and consuming
22plugins feel very natural. To a plugin author: you just implement an
23interface as if it were going to run in the same process. For a plugin user:
24you just use and call functions on an interface as if it were in the same
25process. This plugin system handles the communication in between.
26
27**Complex arguments and return values are supported.** This library
28provides APIs for handling complex arguments and return values such
29as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library
30(`MuxBroker`) for creating new connections between the client/server to
31serve additional interfaces or transfer raw data.
32
33**Bidirectional communication.** Because the plugin system supports
34complex arguments, the host process can send it interface implementations
35and the plugin can call back into the host process.
36
37**Built-in Logging.** Any plugins that use the `log` standard library
38will have log data automatically sent to the host process. The host
39process will mirror this output prefixed with the path to the plugin
40binary. This makes debugging with plugins simple.
41
42**Protocol Versioning.** A very basic "protocol version" is supported that
43can be incremented to invalidate any previous plugins. This is useful when
44interface signatures are changing, protocol level changes are necessary,
45etc. When a protocol version is incompatible, a human friendly error
46message is shown to the end user.
47
48**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue
49to use stdout/stderr as usual and the output will get mirrored back to
50the host process. The host process can control what `io.Writer` these
51streams go to to prevent this from happening.
52
53**TTY Preservation.** Plugin subprocesses are connected to the identical
54stdin file descriptor as the host process, allowing software that requires
55a TTY to work. For example, a plugin can execute `ssh` and even though there
56are multiple subprocesses and RPC happening, it will look and act perfectly
57to the end user.
58
59**Host upgrade while a plugin is running.** Plugins can be "reattached"
60so that the host process can be upgraded while the plugin is still running.
61This requires the host/plugin to know this is possible and daemonize
62properly. `NewClient` takes a `ReattachConfig` to determine if and how to
63reattach.
64
65## Architecture
66
67The HashiCorp plugin system works by launching subprocesses and communicating
68over RPC (using standard `net/rpc`). A single connection is made between
69any plugin and the host process, and we use a
70[connection multiplexing](https://github.com/hashicorp/yamux)
71library to multiplex any other connections on top.
72
73This architecture has a number of benefits:
74
75 * Plugins can't crash your host process: A panic in a plugin doesn't
76 panic the plugin user.
77
78 * Plugins are very easy to write: just write a Go application and `go build`.
79 Theoretically you could also use another language as long as it can
80 communicate the Go `net/rpc` protocol but this hasn't yet been tried.
81
82 * Plugins are very easy to install: just put the binary in a location where
83 the host will find it (depends on the host but this library also provides
84 helpers), and the plugin host handles the rest.
85
86 * Plugins can be relatively secure: The plugin only has access to the
87 interfaces and args given to it, not to the entire memory space of the
88 process. More security features are planned (see the coming soon section
89 below).
90
91## Usage
92
93To use the plugin system, you must take the following steps. These are
94high-level steps that must be done. Examples are available in the
95`examples/` directory.
96
97 1. Choose the interface(s) you want to expose for plugins.
98
99 2. For each interface, implement an implementation of that interface
100 that communicates over an `*rpc.Client` (from the standard `net/rpc`
101 package) for every function call. Likewise, implement the RPC server
102 struct this communicates to which is then communicating to a real,
103 concrete implementation.
104
105 3. Create a `Plugin` implementation that knows how to create the RPC
106 client/server for a given plugin type.
107
108 4. Plugin authors call `plugin.Serve` to serve a plugin from the
109 `main` function.
110
111 5. Plugin users use `plugin.Client` to launch a subprocess and request
112 an interface implementation over RPC.
113
114That's it! In practice, step 2 is the most tedious and time consuming step.
115Even so, it isn't very difficult and you can see examples in the `examples/`
116directory as well as throughout our various open source projects.
117
118For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin).
119
120## Roadmap
121
122Our plugin system is constantly evolving. As we use the plugin system for
123new projects or for new features in existing projects, we constantly find
124improvements we can make.
125
126At this point in time, the roadmap for the plugin system is:
127
128**Cryptographically Secure Plugins.** We'll implement signing plugins
129and loading signed plugins in order to allow Vault to make use of multi-process
130in a secure way.
131
132**Semantic Versioning.** Plugins will be able to implement a semantic version.
133This plugin system will give host processes a system for constraining
134versions. This is in addition to the protocol versioning already present
135which is more for larger underlying changes.
136
137**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter)
138to support automatic download + install of plugins. Paired with cryptographically
139secure plugins (above), we can make this a safe operation for an amazing
140user experience.
141
142## What About Shared Libraries?
143
144When we started using plugins (late 2012, early 2013), plugins over RPC
145were the only option since Go didn't support dynamic library loading. Today,
146Go still doesn't support dynamic library loading, but they do intend to.
147Since 2012, our plugin system has stabilized from millions of users using it,
148and has many benefits we've come to value greatly.
149
150For example, we intend to use this plugin system in
151[Vault](https://www.vaultproject.io), and dynamic library loading will
152simply never be acceptable in Vault for security reasons. That is an extreme
153example, but we believe our library system has more upsides than downsides
154over dynamic library loading and since we've had it built and tested for years,
155we'll likely continue to use it.
156
157Shared libraries have one major advantage over our system which is much
158higher performance. In real world scenarios across our various tools,
159we've never required any more performance out of our plugin system and it
160has seen very high throughput, so this isn't a concern for us at the moment.
161
diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go
new file mode 100644
index 0000000..9f8a0f2
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/client.go
@@ -0,0 +1,581 @@
1package plugin
2
3import (
4 "bufio"
5 "errors"
6 "fmt"
7 "io"
8 "io/ioutil"
9 "log"
10 "net"
11 "os"
12 "os/exec"
13 "path/filepath"
14 "strconv"
15 "strings"
16 "sync"
17 "sync/atomic"
18 "time"
19 "unicode"
20)
21
22// If this is 1, then we've called CleanupClients. This can be used
23// by plugin RPC implementations to change error behavior since you
24// can expected network connection errors at this point. This should be
25// read by using sync/atomic.
26var Killed uint32 = 0
27
28// This is a slice of the "managed" clients which are cleaned up when
29// calling Cleanup
30var managedClients = make([]*Client, 0, 5)
31var managedClientsLock sync.Mutex
32
33// Error types
34var (
35 // ErrProcessNotFound is returned when a client is instantiated to
36 // reattach to an existing process and it isn't found.
37 ErrProcessNotFound = errors.New("Reattachment process not found")
38)
39
40// Client handles the lifecycle of a plugin application. It launches
41// plugins, connects to them, dispenses interface implementations, and handles
42// killing the process.
43//
44// Plugin hosts should use one Client for each plugin executable. To
45// dispense a plugin type, use the `Client.Client` function, and then
46// cal `Dispense`. This awkward API is mostly historical but is used to split
47// the client that deals with subprocess management and the client that
48// does RPC management.
49//
50// See NewClient and ClientConfig for using a Client.
51type Client struct {
52 config *ClientConfig
53 exited bool
54 doneLogging chan struct{}
55 l sync.Mutex
56 address net.Addr
57 process *os.Process
58 client *RPCClient
59}
60
61// ClientConfig is the configuration used to initialize a new
62// plugin client. After being used to initialize a plugin client,
63// that configuration must not be modified again.
64type ClientConfig struct {
65 // HandshakeConfig is the configuration that must match servers.
66 HandshakeConfig
67
68 // Plugins are the plugins that can be consumed.
69 Plugins map[string]Plugin
70
71 // One of the following must be set, but not both.
72 //
73 // Cmd is the unstarted subprocess for starting the plugin. If this is
74 // set, then the Client starts the plugin process on its own and connects
75 // to it.
76 //
77 // Reattach is configuration for reattaching to an existing plugin process
78 // that is already running. This isn't common.
79 Cmd *exec.Cmd
80 Reattach *ReattachConfig
81
82 // Managed represents if the client should be managed by the
83 // plugin package or not. If true, then by calling CleanupClients,
84 // it will automatically be cleaned up. Otherwise, the client
85 // user is fully responsible for making sure to Kill all plugin
86 // clients. By default the client is _not_ managed.
87 Managed bool
88
89 // The minimum and maximum port to use for communicating with
90 // the subprocess. If not set, this defaults to 10,000 and 25,000
91 // respectively.
92 MinPort, MaxPort uint
93
94 // StartTimeout is the timeout to wait for the plugin to say it
95 // has started successfully.
96 StartTimeout time.Duration
97
98 // If non-nil, then the stderr of the client will be written to here
99 // (as well as the log). This is the original os.Stderr of the subprocess.
100 // This isn't the output of synced stderr.
101 Stderr io.Writer
102
103 // SyncStdout, SyncStderr can be set to override the
104 // respective os.Std* values in the plugin. Care should be taken to
105 // avoid races here. If these are nil, then this will automatically be
106 // hooked up to os.Stdin, Stdout, and Stderr, respectively.
107 //
108 // If the default values (nil) are used, then this package will not
109 // sync any of these streams.
110 SyncStdout io.Writer
111 SyncStderr io.Writer
112}
113
114// ReattachConfig is used to configure a client to reattach to an
115// already-running plugin process. You can retrieve this information by
116// calling ReattachConfig on Client.
117type ReattachConfig struct {
118 Addr net.Addr
119 Pid int
120}
121
122// This makes sure all the managed subprocesses are killed and properly
123// logged. This should be called before the parent process running the
124// plugins exits.
125//
126// This must only be called _once_.
127func CleanupClients() {
128 // Set the killed to true so that we don't get unexpected panics
129 atomic.StoreUint32(&Killed, 1)
130
131 // Kill all the managed clients in parallel and use a WaitGroup
132 // to wait for them all to finish up.
133 var wg sync.WaitGroup
134 managedClientsLock.Lock()
135 for _, client := range managedClients {
136 wg.Add(1)
137
138 go func(client *Client) {
139 client.Kill()
140 wg.Done()
141 }(client)
142 }
143 managedClientsLock.Unlock()
144
145 log.Println("[DEBUG] plugin: waiting for all plugin processes to complete...")
146 wg.Wait()
147}
148
149// Creates a new plugin client which manages the lifecycle of an external
150// plugin and gets the address for the RPC connection.
151//
152// The client must be cleaned up at some point by calling Kill(). If
153// the client is a managed client (created with NewManagedClient) you
154// can just call CleanupClients at the end of your program and they will
155// be properly cleaned.
156func NewClient(config *ClientConfig) (c *Client) {
157 if config.MinPort == 0 && config.MaxPort == 0 {
158 config.MinPort = 10000
159 config.MaxPort = 25000
160 }
161
162 if config.StartTimeout == 0 {
163 config.StartTimeout = 1 * time.Minute
164 }
165
166 if config.Stderr == nil {
167 config.Stderr = ioutil.Discard
168 }
169
170 if config.SyncStdout == nil {
171 config.SyncStdout = ioutil.Discard
172 }
173 if config.SyncStderr == nil {
174 config.SyncStderr = ioutil.Discard
175 }
176
177 c = &Client{config: config}
178 if config.Managed {
179 managedClientsLock.Lock()
180 managedClients = append(managedClients, c)
181 managedClientsLock.Unlock()
182 }
183
184 return
185}
186
187// Client returns an RPC client for the plugin.
188//
189// Subsequent calls to this will return the same RPC client.
190func (c *Client) Client() (*RPCClient, error) {
191 addr, err := c.Start()
192 if err != nil {
193 return nil, err
194 }
195
196 c.l.Lock()
197 defer c.l.Unlock()
198
199 if c.client != nil {
200 return c.client, nil
201 }
202
203 // Connect to the client
204 conn, err := net.Dial(addr.Network(), addr.String())
205 if err != nil {
206 return nil, err
207 }
208 if tcpConn, ok := conn.(*net.TCPConn); ok {
209 // Make sure to set keep alive so that the connection doesn't die
210 tcpConn.SetKeepAlive(true)
211 }
212
213 // Create the actual RPC client
214 c.client, err = NewRPCClient(conn, c.config.Plugins)
215 if err != nil {
216 conn.Close()
217 return nil, err
218 }
219
220 // Begin the stream syncing so that stdin, out, err work properly
221 err = c.client.SyncStreams(
222 c.config.SyncStdout,
223 c.config.SyncStderr)
224 if err != nil {
225 c.client.Close()
226 c.client = nil
227 return nil, err
228 }
229
230 return c.client, nil
231}
232
233// Tells whether or not the underlying process has exited.
234func (c *Client) Exited() bool {
235 c.l.Lock()
236 defer c.l.Unlock()
237 return c.exited
238}
239
240// End the executing subprocess (if it is running) and perform any cleanup
241// tasks necessary such as capturing any remaining logs and so on.
242//
243// This method blocks until the process successfully exits.
244//
245// This method can safely be called multiple times.
246func (c *Client) Kill() {
247 // Grab a lock to read some private fields.
248 c.l.Lock()
249 process := c.process
250 addr := c.address
251 doneCh := c.doneLogging
252 c.l.Unlock()
253
254 // If there is no process, we never started anything. Nothing to kill.
255 if process == nil {
256 return
257 }
258
259 // We need to check for address here. It is possible that the plugin
260 // started (process != nil) but has no address (addr == nil) if the
261 // plugin failed at startup. If we do have an address, we need to close
262 // the plugin net connections.
263 graceful := false
264 if addr != nil {
265 // Close the client to cleanly exit the process.
266 client, err := c.Client()
267 if err == nil {
268 err = client.Close()
269
270 // If there is no error, then we attempt to wait for a graceful
271 // exit. If there was an error, we assume that graceful cleanup
272 // won't happen and just force kill.
273 graceful = err == nil
274 if err != nil {
275 // If there was an error just log it. We're going to force
276 // kill in a moment anyways.
277 log.Printf(
278 "[WARN] plugin: error closing client during Kill: %s", err)
279 }
280 }
281 }
282
283 // If we're attempting a graceful exit, then we wait for a short period
284 // of time to allow that to happen. To wait for this we just wait on the
285 // doneCh which would be closed if the process exits.
286 if graceful {
287 select {
288 case <-doneCh:
289 return
290 case <-time.After(250 * time.Millisecond):
291 }
292 }
293
294 // If graceful exiting failed, just kill it
295 process.Kill()
296
297 // Wait for the client to finish logging so we have a complete log
298 <-doneCh
299}
300
301// Starts the underlying subprocess, communicating with it to negotiate
302// a port for RPC connections, and returning the address to connect via RPC.
303//
304// This method is safe to call multiple times. Subsequent calls have no effect.
305// Once a client has been started once, it cannot be started again, even if
306// it was killed.
307func (c *Client) Start() (addr net.Addr, err error) {
308 c.l.Lock()
309 defer c.l.Unlock()
310
311 if c.address != nil {
312 return c.address, nil
313 }
314
315 // If one of cmd or reattach isn't set, then it is an error. We wrap
316 // this in a {} for scoping reasons, and hopeful that the escape
317 // analysis will pop the stock here.
318 {
319 cmdSet := c.config.Cmd != nil
320 attachSet := c.config.Reattach != nil
321 if cmdSet == attachSet {
322 return nil, fmt.Errorf("Only one of Cmd or Reattach must be set")
323 }
324 }
325
326 // Create the logging channel for when we kill
327 c.doneLogging = make(chan struct{})
328
329 if c.config.Reattach != nil {
330 // Verify the process still exists. If not, then it is an error
331 p, err := os.FindProcess(c.config.Reattach.Pid)
332 if err != nil {
333 return nil, err
334 }
335
336 // Attempt to connect to the addr since on Unix systems FindProcess
337 // doesn't actually return an error if it can't find the process.
338 conn, err := net.Dial(
339 c.config.Reattach.Addr.Network(),
340 c.config.Reattach.Addr.String())
341 if err != nil {
342 p.Kill()
343 return nil, ErrProcessNotFound
344 }
345 conn.Close()
346
347 // Goroutine to mark exit status
348 go func(pid int) {
349 // Wait for the process to die
350 pidWait(pid)
351
352 // Log so we can see it
353 log.Printf("[DEBUG] plugin: reattached plugin process exited\n")
354
355 // Mark it
356 c.l.Lock()
357 defer c.l.Unlock()
358 c.exited = true
359
360 // Close the logging channel since that doesn't work on reattach
361 close(c.doneLogging)
362 }(p.Pid)
363
364 // Set the address and process
365 c.address = c.config.Reattach.Addr
366 c.process = p
367
368 return c.address, nil
369 }
370
371 env := []string{
372 fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue),
373 fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort),
374 fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort),
375 }
376
377 stdout_r, stdout_w := io.Pipe()
378 stderr_r, stderr_w := io.Pipe()
379
380 cmd := c.config.Cmd
381 cmd.Env = append(cmd.Env, os.Environ()...)
382 cmd.Env = append(cmd.Env, env...)
383 cmd.Stdin = os.Stdin
384 cmd.Stderr = stderr_w
385 cmd.Stdout = stdout_w
386
387 log.Printf("[DEBUG] plugin: starting plugin: %s %#v", cmd.Path, cmd.Args)
388 err = cmd.Start()
389 if err != nil {
390 return
391 }
392
393 // Set the process
394 c.process = cmd.Process
395
396 // Make sure the command is properly cleaned up if there is an error
397 defer func() {
398 r := recover()
399
400 if err != nil || r != nil {
401 cmd.Process.Kill()
402 }
403
404 if r != nil {
405 panic(r)
406 }
407 }()
408
409 // Start goroutine to wait for process to exit
410 exitCh := make(chan struct{})
411 go func() {
412 // Make sure we close the write end of our stderr/stdout so
413 // that the readers send EOF properly.
414 defer stderr_w.Close()
415 defer stdout_w.Close()
416
417 // Wait for the command to end.
418 cmd.Wait()
419
420 // Log and make sure to flush the logs write away
421 log.Printf("[DEBUG] plugin: %s: plugin process exited\n", cmd.Path)
422 os.Stderr.Sync()
423
424 // Mark that we exited
425 close(exitCh)
426
427 // Set that we exited, which takes a lock
428 c.l.Lock()
429 defer c.l.Unlock()
430 c.exited = true
431 }()
432
433 // Start goroutine that logs the stderr
434 go c.logStderr(stderr_r)
435
436 // Start a goroutine that is going to be reading the lines
437 // out of stdout
438 linesCh := make(chan []byte)
439 go func() {
440 defer close(linesCh)
441
442 buf := bufio.NewReader(stdout_r)
443 for {
444 line, err := buf.ReadBytes('\n')
445 if line != nil {
446 linesCh <- line
447 }
448
449 if err == io.EOF {
450 return
451 }
452 }
453 }()
454
455 // Make sure after we exit we read the lines from stdout forever
456 // so they don't block since it is an io.Pipe
457 defer func() {
458 go func() {
459 for _ = range linesCh {
460 }
461 }()
462 }()
463
464 // Some channels for the next step
465 timeout := time.After(c.config.StartTimeout)
466
467 // Start looking for the address
468 log.Printf("[DEBUG] plugin: waiting for RPC address for: %s", cmd.Path)
469 select {
470 case <-timeout:
471 err = errors.New("timeout while waiting for plugin to start")
472 case <-exitCh:
473 err = errors.New("plugin exited before we could connect")
474 case lineBytes := <-linesCh:
475 // Trim the line and split by "|" in order to get the parts of
476 // the output.
477 line := strings.TrimSpace(string(lineBytes))
478 parts := strings.SplitN(line, "|", 4)
479 if len(parts) < 4 {
480 err = fmt.Errorf(
481 "Unrecognized remote plugin message: %s\n\n"+
482 "This usually means that the plugin is either invalid or simply\n"+
483 "needs to be recompiled to support the latest protocol.", line)
484 return
485 }
486
487 // Check the core protocol. Wrapped in a {} for scoping.
488 {
489 var coreProtocol int64
490 coreProtocol, err = strconv.ParseInt(parts[0], 10, 0)
491 if err != nil {
492 err = fmt.Errorf("Error parsing core protocol version: %s", err)
493 return
494 }
495
496 if int(coreProtocol) != CoreProtocolVersion {
497 err = fmt.Errorf("Incompatible core API version with plugin. "+
498 "Plugin version: %s, Ours: %d\n\n"+
499 "To fix this, the plugin usually only needs to be recompiled.\n"+
500 "Please report this to the plugin author.", parts[0], CoreProtocolVersion)
501 return
502 }
503 }
504
505 // Parse the protocol version
506 var protocol int64
507 protocol, err = strconv.ParseInt(parts[1], 10, 0)
508 if err != nil {
509 err = fmt.Errorf("Error parsing protocol version: %s", err)
510 return
511 }
512
513 // Test the API version
514 if uint(protocol) != c.config.ProtocolVersion {
515 err = fmt.Errorf("Incompatible API version with plugin. "+
516 "Plugin version: %s, Ours: %d", parts[1], c.config.ProtocolVersion)
517 return
518 }
519
520 switch parts[2] {
521 case "tcp":
522 addr, err = net.ResolveTCPAddr("tcp", parts[3])
523 case "unix":
524 addr, err = net.ResolveUnixAddr("unix", parts[3])
525 default:
526 err = fmt.Errorf("Unknown address type: %s", parts[3])
527 }
528 }
529
530 c.address = addr
531 return
532}
533
534// ReattachConfig returns the information that must be provided to NewClient
535// to reattach to the plugin process that this client started. This is
536// useful for plugins that detach from their parent process.
537//
538// If this returns nil then the process hasn't been started yet. Please
539// call Start or Client before calling this.
540func (c *Client) ReattachConfig() *ReattachConfig {
541 c.l.Lock()
542 defer c.l.Unlock()
543
544 if c.address == nil {
545 return nil
546 }
547
548 if c.config.Cmd != nil && c.config.Cmd.Process == nil {
549 return nil
550 }
551
552 // If we connected via reattach, just return the information as-is
553 if c.config.Reattach != nil {
554 return c.config.Reattach
555 }
556
557 return &ReattachConfig{
558 Addr: c.address,
559 Pid: c.config.Cmd.Process.Pid,
560 }
561}
562
563func (c *Client) logStderr(r io.Reader) {
564 bufR := bufio.NewReader(r)
565 for {
566 line, err := bufR.ReadString('\n')
567 if line != "" {
568 c.config.Stderr.Write([]byte(line))
569
570 line = strings.TrimRightFunc(line, unicode.IsSpace)
571 log.Printf("[DEBUG] plugin: %s: %s", filepath.Base(c.config.Cmd.Path), line)
572 }
573
574 if err == io.EOF {
575 break
576 }
577 }
578
579 // Flag that we've completed logging for others
580 close(c.doneLogging)
581}
diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go
new file mode 100644
index 0000000..d22c566
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/discover.go
@@ -0,0 +1,28 @@
1package plugin
2
3import (
4 "path/filepath"
5)
6
7// Discover discovers plugins that are in a given directory.
8//
9// The directory doesn't need to be absolute. For example, "." will work fine.
10//
11// This currently assumes any file matching the glob is a plugin.
12// In the future this may be smarter about checking that a file is
13// executable and so on.
14//
15// TODO: test
16func Discover(glob, dir string) ([]string, error) {
17 var err error
18
19 // Make the directory absolute if it isn't already
20 if !filepath.IsAbs(dir) {
21 dir, err = filepath.Abs(dir)
22 if err != nil {
23 return nil, err
24 }
25 }
26
27 return filepath.Glob(filepath.Join(dir, glob))
28}
diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go
new file mode 100644
index 0000000..22a7baa
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/error.go
@@ -0,0 +1,24 @@
1package plugin
2
3// This is a type that wraps error types so that they can be messaged
4// across RPC channels. Since "error" is an interface, we can't always
5// gob-encode the underlying structure. This is a valid error interface
6// implementer that we will push across.
7type BasicError struct {
8 Message string
9}
10
11// NewBasicError is used to create a BasicError.
12//
13// err is allowed to be nil.
14func NewBasicError(err error) *BasicError {
15 if err == nil {
16 return nil
17 }
18
19 return &BasicError{err.Error()}
20}
21
22func (e *BasicError) Error() string {
23 return e.Message
24}
diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go
new file mode 100644
index 0000000..01c45ad
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go
@@ -0,0 +1,204 @@
1package plugin
2
3import (
4 "encoding/binary"
5 "fmt"
6 "log"
7 "net"
8 "sync"
9 "sync/atomic"
10 "time"
11
12 "github.com/hashicorp/yamux"
13)
14
15// MuxBroker is responsible for brokering multiplexed connections by unique ID.
16//
17// It is used by plugins to multiplex multiple RPC connections and data
18// streams on top of a single connection between the plugin process and the
19// host process.
20//
21// This allows a plugin to request a channel with a specific ID to connect to
22// or accept a connection from, and the broker handles the details of
23// holding these channels open while they're being negotiated.
24//
25// The Plugin interface has access to these for both Server and Client.
26// The broker can be used by either (optionally) to reserve and connect to
27// new multiplexed streams. This is useful for complex args and return values,
28// or anything else you might need a data stream for.
29type MuxBroker struct {
30 nextId uint32
31 session *yamux.Session
32 streams map[uint32]*muxBrokerPending
33
34 sync.Mutex
35}
36
37type muxBrokerPending struct {
38 ch chan net.Conn
39 doneCh chan struct{}
40}
41
42func newMuxBroker(s *yamux.Session) *MuxBroker {
43 return &MuxBroker{
44 session: s,
45 streams: make(map[uint32]*muxBrokerPending),
46 }
47}
48
49// Accept accepts a connection by ID.
50//
51// This should not be called multiple times with the same ID at one time.
52func (m *MuxBroker) Accept(id uint32) (net.Conn, error) {
53 var c net.Conn
54 p := m.getStream(id)
55 select {
56 case c = <-p.ch:
57 close(p.doneCh)
58 case <-time.After(5 * time.Second):
59 m.Lock()
60 defer m.Unlock()
61 delete(m.streams, id)
62
63 return nil, fmt.Errorf("timeout waiting for accept")
64 }
65
66 // Ack our connection
67 if err := binary.Write(c, binary.LittleEndian, id); err != nil {
68 c.Close()
69 return nil, err
70 }
71
72 return c, nil
73}
74
75// AcceptAndServe is used to accept a specific stream ID and immediately
76// serve an RPC server on that stream ID. This is used to easily serve
77// complex arguments.
78//
79// The served interface is always registered to the "Plugin" name.
80func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) {
81 conn, err := m.Accept(id)
82 if err != nil {
83 log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err)
84 return
85 }
86
87 serve(conn, "Plugin", v)
88}
89
90// Close closes the connection and all sub-connections.
91func (m *MuxBroker) Close() error {
92 return m.session.Close()
93}
94
95// Dial opens a connection by ID.
96func (m *MuxBroker) Dial(id uint32) (net.Conn, error) {
97 // Open the stream
98 stream, err := m.session.OpenStream()
99 if err != nil {
100 return nil, err
101 }
102
103 // Write the stream ID onto the wire.
104 if err := binary.Write(stream, binary.LittleEndian, id); err != nil {
105 stream.Close()
106 return nil, err
107 }
108
109 // Read the ack that we connected. Then we're off!
110 var ack uint32
111 if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil {
112 stream.Close()
113 return nil, err
114 }
115 if ack != id {
116 stream.Close()
117 return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id)
118 }
119
120 return stream, nil
121}
122
123// NextId returns a unique ID to use next.
124//
125// It is possible for very long-running plugin hosts to wrap this value,
126// though it would require a very large amount of RPC calls. In practice
127// we've never seen it happen.
128func (m *MuxBroker) NextId() uint32 {
129 return atomic.AddUint32(&m.nextId, 1)
130}
131
132// Run starts the brokering and should be executed in a goroutine, since it
133// blocks forever, or until the session closes.
134//
135// Uses of MuxBroker never need to call this. It is called internally by
136// the plugin host/client.
137func (m *MuxBroker) Run() {
138 for {
139 stream, err := m.session.AcceptStream()
140 if err != nil {
141 // Once we receive an error, just exit
142 break
143 }
144
145 // Read the stream ID from the stream
146 var id uint32
147 if err := binary.Read(stream, binary.LittleEndian, &id); err != nil {
148 stream.Close()
149 continue
150 }
151
152 // Initialize the waiter
153 p := m.getStream(id)
154 select {
155 case p.ch <- stream:
156 default:
157 }
158
159 // Wait for a timeout
160 go m.timeoutWait(id, p)
161 }
162}
163
164func (m *MuxBroker) getStream(id uint32) *muxBrokerPending {
165 m.Lock()
166 defer m.Unlock()
167
168 p, ok := m.streams[id]
169 if ok {
170 return p
171 }
172
173 m.streams[id] = &muxBrokerPending{
174 ch: make(chan net.Conn, 1),
175 doneCh: make(chan struct{}),
176 }
177 return m.streams[id]
178}
179
180func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) {
181 // Wait for the stream to either be picked up and connected, or
182 // for a timeout.
183 timeout := false
184 select {
185 case <-p.doneCh:
186 case <-time.After(5 * time.Second):
187 timeout = true
188 }
189
190 m.Lock()
191 defer m.Unlock()
192
193 // Delete the stream so no one else can grab it
194 delete(m.streams, id)
195
196 // If we timed out, then check if we have a channel in the buffer,
197 // and if so, close it.
198 if timeout {
199 select {
200 case s := <-p.ch:
201 s.Close()
202 }
203 }
204}
diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go
new file mode 100644
index 0000000..37c8fd6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/plugin.go
@@ -0,0 +1,25 @@
1// The plugin package exposes functions and helpers for communicating to
2// plugins which are implemented as standalone binary applications.
3//
4// plugin.Client fully manages the lifecycle of executing the application,
5// connecting to it, and returning the RPC client for dispensing plugins.
6//
7// plugin.Serve fully manages listeners to expose an RPC server from a binary
8// that plugin.Client can connect to.
9package plugin
10
11import (
12 "net/rpc"
13)
14
15// Plugin is the interface that is implemented to serve/connect to an
16// inteface implementation.
17type Plugin interface {
18 // Server should return the RPC server compatible struct to serve
19 // the methods that the Client calls over net/rpc.
20 Server(*MuxBroker) (interface{}, error)
21
22 // Client returns an interface implementation for the plugin you're
23 // serving that communicates to the server end of the plugin.
24 Client(*MuxBroker, *rpc.Client) (interface{}, error)
25}
diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go
new file mode 100644
index 0000000..88c999a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/process.go
@@ -0,0 +1,24 @@
1package plugin
2
3import (
4 "time"
5)
6
7// pidAlive checks whether a pid is alive.
8func pidAlive(pid int) bool {
9 return _pidAlive(pid)
10}
11
12// pidWait blocks for a process to exit.
13func pidWait(pid int) error {
14 ticker := time.NewTicker(1 * time.Second)
15 defer ticker.Stop()
16
17 for range ticker.C {
18 if !pidAlive(pid) {
19 break
20 }
21 }
22
23 return nil
24}
diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go
new file mode 100644
index 0000000..70ba546
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go
@@ -0,0 +1,19 @@
1// +build !windows
2
3package plugin
4
5import (
6 "os"
7 "syscall"
8)
9
10// _pidAlive tests whether a process is alive or not by sending it Signal 0,
11// since Go otherwise has no way to test this.
12func _pidAlive(pid int) bool {
13 proc, err := os.FindProcess(pid)
14 if err == nil {
15 err = proc.Signal(syscall.Signal(0))
16 }
17
18 return err == nil
19}
diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go
new file mode 100644
index 0000000..9f7b018
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go
@@ -0,0 +1,29 @@
1package plugin
2
3import (
4 "syscall"
5)
6
7const (
8 // Weird name but matches the MSDN docs
9 exit_STILL_ACTIVE = 259
10
11 processDesiredAccess = syscall.STANDARD_RIGHTS_READ |
12 syscall.PROCESS_QUERY_INFORMATION |
13 syscall.SYNCHRONIZE
14)
15
16// _pidAlive tests whether a process is alive or not
17func _pidAlive(pid int) bool {
18 h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid))
19 if err != nil {
20 return false
21 }
22
23 var ec uint32
24 if e := syscall.GetExitCodeProcess(h, &ec); e != nil {
25 return false
26 }
27
28 return ec == exit_STILL_ACTIVE
29}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go
new file mode 100644
index 0000000..29f9bf0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go
@@ -0,0 +1,123 @@
1package plugin
2
3import (
4 "fmt"
5 "io"
6 "net"
7 "net/rpc"
8
9 "github.com/hashicorp/yamux"
10)
11
12// RPCClient connects to an RPCServer over net/rpc to dispense plugin types.
13type RPCClient struct {
14 broker *MuxBroker
15 control *rpc.Client
16 plugins map[string]Plugin
17
18 // These are the streams used for the various stdout/err overrides
19 stdout, stderr net.Conn
20}
21
22// NewRPCClient creates a client from an already-open connection-like value.
23// Dial is typically used instead.
24func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) {
25 // Create the yamux client so we can multiplex
26 mux, err := yamux.Client(conn, nil)
27 if err != nil {
28 conn.Close()
29 return nil, err
30 }
31
32 // Connect to the control stream.
33 control, err := mux.Open()
34 if err != nil {
35 mux.Close()
36 return nil, err
37 }
38
39 // Connect stdout, stderr streams
40 stdstream := make([]net.Conn, 2)
41 for i, _ := range stdstream {
42 stdstream[i], err = mux.Open()
43 if err != nil {
44 mux.Close()
45 return nil, err
46 }
47 }
48
49 // Create the broker and start it up
50 broker := newMuxBroker(mux)
51 go broker.Run()
52
53 // Build the client using our broker and control channel.
54 return &RPCClient{
55 broker: broker,
56 control: rpc.NewClient(control),
57 plugins: plugins,
58 stdout: stdstream[0],
59 stderr: stdstream[1],
60 }, nil
61}
62
63// SyncStreams should be called to enable syncing of stdout,
64// stderr with the plugin.
65//
66// This will return immediately and the syncing will continue to happen
67// in the background. You do not need to launch this in a goroutine itself.
68//
69// This should never be called multiple times.
70func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error {
71 go copyStream("stdout", stdout, c.stdout)
72 go copyStream("stderr", stderr, c.stderr)
73 return nil
74}
75
76// Close closes the connection. The client is no longer usable after this
77// is called.
78func (c *RPCClient) Close() error {
79 // Call the control channel and ask it to gracefully exit. If this
80 // errors, then we save it so that we always return an error but we
81 // want to try to close the other channels anyways.
82 var empty struct{}
83 returnErr := c.control.Call("Control.Quit", true, &empty)
84
85 // Close the other streams we have
86 if err := c.control.Close(); err != nil {
87 return err
88 }
89 if err := c.stdout.Close(); err != nil {
90 return err
91 }
92 if err := c.stderr.Close(); err != nil {
93 return err
94 }
95 if err := c.broker.Close(); err != nil {
96 return err
97 }
98
99 // Return back the error we got from Control.Quit. This is very important
100 // since we MUST return non-nil error if this fails so that Client.Kill
101 // will properly try a process.Kill.
102 return returnErr
103}
104
105func (c *RPCClient) Dispense(name string) (interface{}, error) {
106 p, ok := c.plugins[name]
107 if !ok {
108 return nil, fmt.Errorf("unknown plugin type: %s", name)
109 }
110
111 var id uint32
112 if err := c.control.Call(
113 "Dispenser.Dispense", name, &id); err != nil {
114 return nil, err
115 }
116
117 conn, err := c.broker.Dial(id)
118 if err != nil {
119 return nil, err
120 }
121
122 return p.Client(c.broker, rpc.NewClient(conn))
123}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go
new file mode 100644
index 0000000..3984dc8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go
@@ -0,0 +1,185 @@
1package plugin
2
3import (
4 "errors"
5 "fmt"
6 "io"
7 "log"
8 "net"
9 "net/rpc"
10 "sync"
11
12 "github.com/hashicorp/yamux"
13)
14
15// RPCServer listens for network connections and then dispenses interface
16// implementations over net/rpc.
17//
18// After setting the fields below, they shouldn't be read again directly
19// from the structure which may be reading/writing them concurrently.
20type RPCServer struct {
21 Plugins map[string]Plugin
22
23 // Stdout, Stderr are what this server will use instead of the
24 // normal stdin/out/err. This is because due to the multi-process nature
25 // of our plugin system, we can't use the normal process values so we
26 // make our own custom one we pipe across.
27 Stdout io.Reader
28 Stderr io.Reader
29
30 // DoneCh should be set to a non-nil channel that will be closed
31 // when the control requests the RPC server to end.
32 DoneCh chan<- struct{}
33
34 lock sync.Mutex
35}
36
37// Accept accepts connections on a listener and serves requests for
38// each incoming connection. Accept blocks; the caller typically invokes
39// it in a go statement.
40func (s *RPCServer) Accept(lis net.Listener) {
41 for {
42 conn, err := lis.Accept()
43 if err != nil {
44 log.Printf("[ERR] plugin: plugin server: %s", err)
45 return
46 }
47
48 go s.ServeConn(conn)
49 }
50}
51
52// ServeConn runs a single connection.
53//
54// ServeConn blocks, serving the connection until the client hangs up.
55func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) {
56 // First create the yamux server to wrap this connection
57 mux, err := yamux.Server(conn, nil)
58 if err != nil {
59 conn.Close()
60 log.Printf("[ERR] plugin: error creating yamux server: %s", err)
61 return
62 }
63
64 // Accept the control connection
65 control, err := mux.Accept()
66 if err != nil {
67 mux.Close()
68 if err != io.EOF {
69 log.Printf("[ERR] plugin: error accepting control connection: %s", err)
70 }
71
72 return
73 }
74
75 // Connect the stdstreams (in, out, err)
76 stdstream := make([]net.Conn, 2)
77 for i, _ := range stdstream {
78 stdstream[i], err = mux.Accept()
79 if err != nil {
80 mux.Close()
81 log.Printf("[ERR] plugin: accepting stream %d: %s", i, err)
82 return
83 }
84 }
85
86 // Copy std streams out to the proper place
87 go copyStream("stdout", stdstream[0], s.Stdout)
88 go copyStream("stderr", stdstream[1], s.Stderr)
89
90 // Create the broker and start it up
91 broker := newMuxBroker(mux)
92 go broker.Run()
93
94 // Use the control connection to build the dispenser and serve the
95 // connection.
96 server := rpc.NewServer()
97 server.RegisterName("Control", &controlServer{
98 server: s,
99 })
100 server.RegisterName("Dispenser", &dispenseServer{
101 broker: broker,
102 plugins: s.Plugins,
103 })
104 server.ServeConn(control)
105}
106
107// done is called internally by the control server to trigger the
108// doneCh to close which is listened to by the main process to cleanly
109// exit.
110func (s *RPCServer) done() {
111 s.lock.Lock()
112 defer s.lock.Unlock()
113
114 if s.DoneCh != nil {
115 close(s.DoneCh)
116 s.DoneCh = nil
117 }
118}
119
120// dispenseServer dispenses variousinterface implementations for Terraform.
121type controlServer struct {
122 server *RPCServer
123}
124
125func (c *controlServer) Quit(
126 null bool, response *struct{}) error {
127 // End the server
128 c.server.done()
129
130 // Always return true
131 *response = struct{}{}
132
133 return nil
134}
135
136// dispenseServer dispenses variousinterface implementations for Terraform.
137type dispenseServer struct {
138 broker *MuxBroker
139 plugins map[string]Plugin
140}
141
142func (d *dispenseServer) Dispense(
143 name string, response *uint32) error {
144 // Find the function to create this implementation
145 p, ok := d.plugins[name]
146 if !ok {
147 return fmt.Errorf("unknown plugin type: %s", name)
148 }
149
150 // Create the implementation first so we know if there is an error.
151 impl, err := p.Server(d.broker)
152 if err != nil {
153 // We turn the error into an errors error so that it works across RPC
154 return errors.New(err.Error())
155 }
156
157 // Reserve an ID for our implementation
158 id := d.broker.NextId()
159 *response = id
160
161 // Run the rest in a goroutine since it can only happen once this RPC
162 // call returns. We wait for a connection for the plugin implementation
163 // and serve it.
164 go func() {
165 conn, err := d.broker.Accept(id)
166 if err != nil {
167 log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err)
168 return
169 }
170
171 serve(conn, "Plugin", impl)
172 }()
173
174 return nil
175}
176
177func serve(conn io.ReadWriteCloser, name string, v interface{}) {
178 server := rpc.NewServer()
179 if err := server.RegisterName(name, v); err != nil {
180 log.Printf("[ERR] go-plugin: plugin dispense error: %s", err)
181 return
182 }
183
184 server.ServeConn(conn)
185}
diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go
new file mode 100644
index 0000000..b5c5270
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/server.go
@@ -0,0 +1,222 @@
1package plugin
2
3import (
4 "errors"
5 "fmt"
6 "io/ioutil"
7 "log"
8 "net"
9 "os"
10 "os/signal"
11 "runtime"
12 "strconv"
13 "sync/atomic"
14)
15
16// CoreProtocolVersion is the ProtocolVersion of the plugin system itself.
17// We will increment this whenever we change any protocol behavior. This
18// will invalidate any prior plugins but will at least allow us to iterate
19// on the core in a safe way. We will do our best to do this very
20// infrequently.
21const CoreProtocolVersion = 1
22
23// HandshakeConfig is the configuration used by client and servers to
24// handshake before starting a plugin connection. This is embedded by
25// both ServeConfig and ClientConfig.
26//
27// In practice, the plugin host creates a HandshakeConfig that is exported
28// and plugins then can easily consume it.
29type HandshakeConfig struct {
30 // ProtocolVersion is the version that clients must match on to
31 // agree they can communicate. This should match the ProtocolVersion
32 // set on ClientConfig when using a plugin.
33 ProtocolVersion uint
34
35 // MagicCookieKey and value are used as a very basic verification
36 // that a plugin is intended to be launched. This is not a security
37 // measure, just a UX feature. If the magic cookie doesn't match,
38 // we show human-friendly output.
39 MagicCookieKey string
40 MagicCookieValue string
41}
42
43// ServeConfig configures what sorts of plugins are served.
44type ServeConfig struct {
45 // HandshakeConfig is the configuration that must match clients.
46 HandshakeConfig
47
48 // Plugins are the plugins that are served.
49 Plugins map[string]Plugin
50}
51
52// Serve serves the plugins given by ServeConfig.
53//
54// Serve doesn't return until the plugin is done being executed. Any
55// errors will be outputted to the log.
56//
57// This is the method that plugins should call in their main() functions.
58func Serve(opts *ServeConfig) {
59 // Validate the handshake config
60 if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" {
61 fmt.Fprintf(os.Stderr,
62 "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+
63 "key or value was set. Please notify the plugin author and report\n"+
64 "this as a bug.\n")
65 os.Exit(1)
66 }
67
68 // First check the cookie
69 if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue {
70 fmt.Fprintf(os.Stderr,
71 "This binary is a plugin. These are not meant to be executed directly.\n"+
72 "Please execute the program that consumes these plugins, which will\n"+
73 "load any plugins automatically\n")
74 os.Exit(1)
75 }
76
77 // Logging goes to the original stderr
78 log.SetOutput(os.Stderr)
79
80 // Create our new stdout, stderr files. These will override our built-in
81 // stdout/stderr so that it works across the stream boundary.
82 stdout_r, stdout_w, err := os.Pipe()
83 if err != nil {
84 fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
85 os.Exit(1)
86 }
87 stderr_r, stderr_w, err := os.Pipe()
88 if err != nil {
89 fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
90 os.Exit(1)
91 }
92
93 // Register a listener so we can accept a connection
94 listener, err := serverListener()
95 if err != nil {
96 log.Printf("[ERR] plugin: plugin init: %s", err)
97 return
98 }
99 defer listener.Close()
100
101 // Create the channel to tell us when we're done
102 doneCh := make(chan struct{})
103
104 // Create the RPC server to dispense
105 server := &RPCServer{
106 Plugins: opts.Plugins,
107 Stdout: stdout_r,
108 Stderr: stderr_r,
109 DoneCh: doneCh,
110 }
111
112 // Output the address and service name to stdout so that core can bring it up.
113 log.Printf("[DEBUG] plugin: plugin address: %s %s\n",
114 listener.Addr().Network(), listener.Addr().String())
115 fmt.Printf("%d|%d|%s|%s\n",
116 CoreProtocolVersion,
117 opts.ProtocolVersion,
118 listener.Addr().Network(),
119 listener.Addr().String())
120 os.Stdout.Sync()
121
122 // Eat the interrupts
123 ch := make(chan os.Signal, 1)
124 signal.Notify(ch, os.Interrupt)
125 go func() {
126 var count int32 = 0
127 for {
128 <-ch
129 newCount := atomic.AddInt32(&count, 1)
130 log.Printf(
131 "[DEBUG] plugin: received interrupt signal (count: %d). Ignoring.",
132 newCount)
133 }
134 }()
135
136 // Set our new out, err
137 os.Stdout = stdout_w
138 os.Stderr = stderr_w
139
140 // Serve
141 go server.Accept(listener)
142
143 // Wait for the graceful exit
144 <-doneCh
145}
146
147func serverListener() (net.Listener, error) {
148 if runtime.GOOS == "windows" {
149 return serverListener_tcp()
150 }
151
152 return serverListener_unix()
153}
154
155func serverListener_tcp() (net.Listener, error) {
156 minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32)
157 if err != nil {
158 return nil, err
159 }
160
161 maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32)
162 if err != nil {
163 return nil, err
164 }
165
166 for port := minPort; port <= maxPort; port++ {
167 address := fmt.Sprintf("127.0.0.1:%d", port)
168 listener, err := net.Listen("tcp", address)
169 if err == nil {
170 return listener, nil
171 }
172 }
173
174 return nil, errors.New("Couldn't bind plugin TCP listener")
175}
176
177func serverListener_unix() (net.Listener, error) {
178 tf, err := ioutil.TempFile("", "plugin")
179 if err != nil {
180 return nil, err
181 }
182 path := tf.Name()
183
184 // Close the file and remove it because it has to not exist for
185 // the domain socket.
186 if err := tf.Close(); err != nil {
187 return nil, err
188 }
189 if err := os.Remove(path); err != nil {
190 return nil, err
191 }
192
193 l, err := net.Listen("unix", path)
194 if err != nil {
195 return nil, err
196 }
197
198 // Wrap the listener in rmListener so that the Unix domain socket file
199 // is removed on close.
200 return &rmListener{
201 Listener: l,
202 Path: path,
203 }, nil
204}
205
206// rmListener is an implementation of net.Listener that forwards most
207// calls to the listener but also removes a file as part of the close. We
208// use this to cleanup the unix domain socket on close.
209type rmListener struct {
210 net.Listener
211 Path string
212}
213
214func (l *rmListener) Close() error {
215 // Close the listener itself
216 if err := l.Listener.Close(); err != nil {
217 return err
218 }
219
220 // Remove the file
221 return os.Remove(l.Path)
222}
diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go
new file mode 100644
index 0000000..033079e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go
@@ -0,0 +1,31 @@
1package plugin
2
3import (
4 "fmt"
5 "os"
6)
7
8// ServeMuxMap is the type that is used to configure ServeMux
9type ServeMuxMap map[string]*ServeConfig
10
11// ServeMux is like Serve, but serves multiple types of plugins determined
12// by the argument given on the command-line.
13//
14// This command doesn't return until the plugin is done being executed. Any
15// errors are logged or output to stderr.
16func ServeMux(m ServeMuxMap) {
17 if len(os.Args) != 2 {
18 fmt.Fprintf(os.Stderr,
19 "Invoked improperly. This is an internal command that shouldn't\n"+
20 "be manually invoked.\n")
21 os.Exit(1)
22 }
23
24 opts, ok := m[os.Args[1]]
25 if !ok {
26 fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1])
27 os.Exit(1)
28 }
29
30 Serve(opts)
31}
diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go
new file mode 100644
index 0000000..1d547aa
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/stream.go
@@ -0,0 +1,18 @@
1package plugin
2
3import (
4 "io"
5 "log"
6)
7
8func copyStream(name string, dst io.Writer, src io.Reader) {
9 if src == nil {
10 panic(name + ": src is nil")
11 }
12 if dst == nil {
13 panic(name + ": dst is nil")
14 }
15 if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
16 log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err)
17 }
18}
diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go
new file mode 100644
index 0000000..9086a1b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/testing.go
@@ -0,0 +1,76 @@
1package plugin
2
3import (
4 "bytes"
5 "net"
6 "net/rpc"
7 "testing"
8)
9
10// The testing file contains test helpers that you can use outside of
11// this package for making it easier to test plugins themselves.
12
13// TestConn is a helper function for returning a client and server
14// net.Conn connected to each other.
15func TestConn(t *testing.T) (net.Conn, net.Conn) {
16 // Listen to any local port. This listener will be closed
17 // after a single connection is established.
18 l, err := net.Listen("tcp", "127.0.0.1:0")
19 if err != nil {
20 t.Fatalf("err: %s", err)
21 }
22
23 // Start a goroutine to accept our client connection
24 var serverConn net.Conn
25 doneCh := make(chan struct{})
26 go func() {
27 defer close(doneCh)
28 defer l.Close()
29 var err error
30 serverConn, err = l.Accept()
31 if err != nil {
32 t.Fatalf("err: %s", err)
33 }
34 }()
35
36 // Connect to the server
37 clientConn, err := net.Dial("tcp", l.Addr().String())
38 if err != nil {
39 t.Fatalf("err: %s", err)
40 }
41
42 // Wait for the server side to acknowledge it has connected
43 <-doneCh
44
45 return clientConn, serverConn
46}
47
48// TestRPCConn returns a rpc client and server connected to each other.
49func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) {
50 clientConn, serverConn := TestConn(t)
51
52 server := rpc.NewServer()
53 go server.ServeConn(serverConn)
54
55 client := rpc.NewClient(clientConn)
56 return client, server
57}
58
59// TestPluginRPCConn returns a plugin RPC client and server that are connected
60// together and configured.
61func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) {
62 // Create two net.Conns we can use to shuttle our control connection
63 clientConn, serverConn := TestConn(t)
64
65 // Start up the server
66 server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)}
67 go server.ServeConn(serverConn)
68
69 // Connect the client to the server
70 client, err := NewRPCClient(clientConn, ps)
71 if err != nil {
72 t.Fatalf("err: %s", err)
73 }
74
75 return client, server
76}
diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/LICENSE
@@ -0,0 +1,363 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. "Contributor"
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. "Contributor Version"
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor's Contribution.
14
151.3. "Contribution"
16
17 means Covered Software of a particular Contributor.
18
191.4. "Covered Software"
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. "Incompatible With Secondary Licenses"
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of
33 version 1.1 or earlier of the License, but not also under the terms of
34 a Secondary License.
35
361.6. "Executable Form"
37
38 means any form of the work other than Source Code Form.
39
401.7. "Larger Work"
41
42 means a work that combines Covered Software with other material, in a
43 separate file or files, that is not Covered Software.
44
451.8. "License"
46
47 means this document.
48
491.9. "Licensable"
50
51 means having the right to grant, to the maximum extent possible, whether
52 at the time of the initial grant or subsequently, any and all of the
53 rights conveyed by this License.
54
551.10. "Modifications"
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to,
60 deletion from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. "Patent Claims" of a Contributor
65
66 means any patent claim(s), including without limitation, method,
67 process, and apparatus claims, in any patent Licensable by such
68 Contributor that would be infringed, but for the grant of the License,
69 by the making, using, selling, offering for sale, having made, import,
70 or transfer of either its Contributions or its Contributor Version.
71
721.12. "Secondary License"
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. "Source Code Form"
79
80 means the form of the work preferred for making modifications.
81
821.14. "You" (or "Your")
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, "You" includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, "control" means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or
104 as part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its
108 Contributions or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution
113 become effective for each Contribution on the date the Contributor first
114 distributes such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under
119 this License. No additional rights or licenses will be implied from the
120 distribution or licensing of Covered Software under this License.
121 Notwithstanding Section 2.1(b) above, no patent license is granted by a
122 Contributor:
123
124 a. for any code that a Contributor has removed from Covered Software; or
125
126 b. for infringements caused by: (i) Your and any other third party's
127 modifications of Covered Software, or (ii) the combination of its
128 Contributions with other software (except as part of its Contributor
129 Version); or
130
131 c. under Patent Claims infringed by Covered Software in the absence of
132 its Contributions.
133
134 This License does not grant any rights in the trademarks, service marks,
135 or logos of any Contributor (except as may be necessary to comply with
136 the notice requirements in Section 3.4).
137
1382.4. Subsequent Licenses
139
140 No Contributor makes additional grants as a result of Your choice to
141 distribute the Covered Software under a subsequent version of this
142 License (see Section 10.2) or under the terms of a Secondary License (if
143 permitted under the terms of Section 3.3).
144
1452.5. Representation
146
147 Each Contributor represents that the Contributor believes its
148 Contributions are its original creation(s) or it has sufficient rights to
149 grant the rights to its Contributions conveyed by this License.
150
1512.6. Fair Use
152
153 This License is not intended to limit any rights You have under
154 applicable copyright doctrines of fair use, fair dealing, or other
155 equivalents.
156
1572.7. Conditions
158
159 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
160 Section 2.1.
161
162
1633. Responsibilities
164
1653.1. Distribution of Source Form
166
167 All distribution of Covered Software in Source Code Form, including any
168 Modifications that You create or to which You contribute, must be under
169 the terms of this License. You must inform recipients that the Source
170 Code Form of the Covered Software is governed by the terms of this
171 License, and how they can obtain a copy of this License. You may not
172 attempt to alter or restrict the recipients' rights in the Source Code
173 Form.
174
1753.2. Distribution of Executable Form
176
177 If You distribute Covered Software in Executable Form then:
178
179 a. such Covered Software must also be made available in Source Code Form,
180 as described in Section 3.1, and You must inform recipients of the
181 Executable Form how they can obtain a copy of such Source Code Form by
182 reasonable means in a timely manner, at a charge no more than the cost
183 of distribution to the recipient; and
184
185 b. You may distribute such Executable Form under the terms of this
186 License, or sublicense it under different terms, provided that the
187 license for the Executable Form does not attempt to limit or alter the
188 recipients' rights in the Source Code Form under this License.
189
1903.3. Distribution of a Larger Work
191
192 You may create and distribute a Larger Work under terms of Your choice,
193 provided that You also comply with the requirements of this License for
194 the Covered Software. If the Larger Work is a combination of Covered
195 Software with a work governed by one or more Secondary Licenses, and the
196 Covered Software is not Incompatible With Secondary Licenses, this
197 License permits You to additionally distribute such Covered Software
198 under the terms of such Secondary License(s), so that the recipient of
199 the Larger Work may, at their option, further distribute the Covered
200 Software under the terms of either this License or such Secondary
201 License(s).
202
2033.4. Notices
204
205 You may not remove or alter the substance of any license notices
206 (including copyright notices, patent notices, disclaimers of warranty, or
207 limitations of liability) contained within the Source Code Form of the
208 Covered Software, except that You may alter any license notices to the
209 extent required to remedy known factual inaccuracies.
210
2113.5. Application of Additional Terms
212
213 You may choose to offer, and to charge a fee for, warranty, support,
214 indemnity or liability obligations to one or more recipients of Covered
215 Software. However, You may do so only on Your own behalf, and not on
216 behalf of any Contributor. You must make it absolutely clear that any
217 such warranty, support, indemnity, or liability obligation is offered by
218 You alone, and You hereby agree to indemnify every Contributor for any
219 liability incurred by such Contributor as a result of warranty, support,
220 indemnity or liability terms You offer. You may include additional
221 disclaimers of warranty and limitations of liability specific to any
222 jurisdiction.
223
2244. Inability to Comply Due to Statute or Regulation
225
226 If it is impossible for You to comply with any of the terms of this License
227 with respect to some or all of the Covered Software due to statute,
228 judicial order, or regulation then You must: (a) comply with the terms of
229 this License to the maximum extent possible; and (b) describe the
230 limitations and the code they affect. Such description must be placed in a
231 text file included with all distributions of the Covered Software under
232 this License. Except to the extent prohibited by statute or regulation,
233 such description must be sufficiently detailed for a recipient of ordinary
234 skill to be able to understand it.
235
2365. Termination
237
2385.1. The rights granted under this License will terminate automatically if You
239 fail to comply with any of its terms. However, if You become compliant,
240 then the rights granted under this License from a particular Contributor
241 are reinstated (a) provisionally, unless and until such Contributor
242 explicitly and finally terminates Your grants, and (b) on an ongoing
243 basis, if such Contributor fails to notify You of the non-compliance by
244 some reasonable means prior to 60 days after You have come back into
245 compliance. Moreover, Your grants from a particular Contributor are
246 reinstated on an ongoing basis if such Contributor notifies You of the
247 non-compliance by some reasonable means, this is the first time You have
248 received notice of non-compliance with this License from such
249 Contributor, and You become compliant prior to 30 days after Your receipt
250 of the notice.
251
2525.2. If You initiate litigation against any entity by asserting a patent
253 infringement claim (excluding declaratory judgment actions,
254 counter-claims, and cross-claims) alleging that a Contributor Version
255 directly or indirectly infringes any patent, then the rights granted to
256 You by any and all Contributors for the Covered Software under Section
257 2.1 of this License shall terminate.
258
2595.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
260 license agreements (excluding distributors and resellers) which have been
261 validly granted by You or Your distributors under this License prior to
262 termination shall survive termination.
263
2646. Disclaimer of Warranty
265
266 Covered Software is provided under this License on an "as is" basis,
267 without warranty of any kind, either expressed, implied, or statutory,
268 including, without limitation, warranties that the Covered Software is free
269 of defects, merchantable, fit for a particular purpose or non-infringing.
270 The entire risk as to the quality and performance of the Covered Software
271 is with You. Should any Covered Software prove defective in any respect,
272 You (not any Contributor) assume the cost of any necessary servicing,
273 repair, or correction. This disclaimer of warranty constitutes an essential
274 part of this License. No use of any Covered Software is authorized under
275 this License except under this disclaimer.
276
2777. Limitation of Liability
278
279 Under no circumstances and under no legal theory, whether tort (including
280 negligence), contract, or otherwise, shall any Contributor, or anyone who
281 distributes Covered Software as permitted above, be liable to You for any
282 direct, indirect, special, incidental, or consequential damages of any
283 character including, without limitation, damages for lost profits, loss of
284 goodwill, work stoppage, computer failure or malfunction, or any and all
285 other commercial damages or losses, even if such party shall have been
286 informed of the possibility of such damages. This limitation of liability
287 shall not apply to liability for death or personal injury resulting from
288 such party's negligence to the extent applicable law prohibits such
289 limitation. Some jurisdictions do not allow the exclusion or limitation of
290 incidental or consequential damages, so this exclusion and limitation may
291 not apply to You.
292
2938. Litigation
294
295 Any litigation relating to this License may be brought only in the courts
296 of a jurisdiction where the defendant maintains its principal place of
297 business and such litigation shall be governed by laws of that
298 jurisdiction, without reference to its conflict-of-law provisions. Nothing
299 in this Section shall prevent a party's ability to bring cross-claims or
300 counter-claims.
301
3029. Miscellaneous
303
304 This License represents the complete agreement concerning the subject
305 matter hereof. If any provision of this License is held to be
306 unenforceable, such provision shall be reformed only to the extent
307 necessary to make it enforceable. Any law or regulation which provides that
308 the language of a contract shall be construed against the drafter shall not
309 be used to construe this License against a Contributor.
310
311
31210. Versions of the License
313
31410.1. New Versions
315
316 Mozilla Foundation is the license steward. Except as provided in Section
317 10.3, no one other than the license steward has the right to modify or
318 publish new versions of this License. Each version will be given a
319 distinguishing version number.
320
32110.2. Effect of New Versions
322
323 You may distribute the Covered Software under the terms of the version
324 of the License under which You originally received the Covered Software,
325 or under the terms of any subsequent version published by the license
326 steward.
327
32810.3. Modified Versions
329
330 If you create software not governed by this License, and you want to
331 create a new license for such software, you may create and use a
332 modified version of this License if you rename the license and remove
333 any references to the name of the license steward (except to note that
334 such modified license differs from this License).
335
33610.4. Distributing Source Code Form that is Incompatible With Secondary
337 Licenses If You choose to distribute Source Code Form that is
338 Incompatible With Secondary Licenses under the terms of this version of
339 the License, the notice described in Exhibit B of this License must be
340 attached.
341
342Exhibit A - Source Code Form License Notice
343
344 This Source Code Form is subject to the
345 terms of the Mozilla Public License, v.
346 2.0. If a copy of the MPL was not
347 distributed with this file, You can
348 obtain one at
349 http://mozilla.org/MPL/2.0/.
350
351If it is not possible or desirable to put the notice in a particular file,
352then You may include the notice in a location (such as a LICENSE file in a
353relevant directory) where a recipient would be likely to look for such a
354notice.
355
356You may add additional accurate notices of copyright ownership.
357
358Exhibit B - "Incompatible With Secondary Licenses" Notice
359
360 This Source Code Form is "Incompatible
361 With Secondary Licenses", as defined by
362 the Mozilla Public License, v. 2.0.
363
diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md
new file mode 100644
index 0000000..21fdda4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/README.md
@@ -0,0 +1,8 @@
1# uuid
2
3Generates UUID-format strings using purely high quality random bytes.
4
5Documentation
6=============
7
8The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid).
diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go
new file mode 100644
index 0000000..322b522
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/uuid.go
@@ -0,0 +1,57 @@
1package uuid
2
3import (
4 "crypto/rand"
5 "encoding/hex"
6 "fmt"
7)
8
9// GenerateUUID is used to generate a random UUID
10func GenerateUUID() (string, error) {
11 buf := make([]byte, 16)
12 if _, err := rand.Read(buf); err != nil {
13 return "", fmt.Errorf("failed to read random bytes: %v", err)
14 }
15
16 return FormatUUID(buf)
17}
18
19func FormatUUID(buf []byte) (string, error) {
20 if len(buf) != 16 {
21 return "", fmt.Errorf("wrong length byte slice (%d)", len(buf))
22 }
23
24 return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
25 buf[0:4],
26 buf[4:6],
27 buf[6:8],
28 buf[8:10],
29 buf[10:16]), nil
30}
31
32func ParseUUID(uuid string) ([]byte, error) {
33 if len(uuid) != 36 {
34 return nil, fmt.Errorf("uuid string is wrong length")
35 }
36
37 hyph := []byte("-")
38
39 if uuid[8] != hyph[0] ||
40 uuid[13] != hyph[0] ||
41 uuid[18] != hyph[0] ||
42 uuid[23] != hyph[0] {
43 return nil, fmt.Errorf("uuid is improperly formatted")
44 }
45
46 hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36]
47
48 ret, err := hex.DecodeString(hexStr)
49 if err != nil {
50 return nil, err
51 }
52 if len(ret) != 16 {
53 return nil, fmt.Errorf("decoded hex is the wrong length")
54 }
55
56 return ret, nil
57}
diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/LICENSE
@@ -0,0 +1,354 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. “Contributor”
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. “Contributor Version”
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor’s Contribution.
14
151.3. “Contribution”
16
17 means Covered Software of a particular Contributor.
18
191.4. “Covered Software”
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. “Incompatible With Secondary Licenses”
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of version
33 1.1 or earlier of the License, but not also under the terms of a
34 Secondary License.
35
361.6. “Executable Form”
37
38 means any form of the work other than Source Code Form.
39
401.7. “Larger Work”
41
42 means a work that combines Covered Software with other material, in a separate
43 file or files, that is not Covered Software.
44
451.8. “License”
46
47 means this document.
48
491.9. “Licensable”
50
51 means having the right to grant, to the maximum extent possible, whether at the
52 time of the initial grant or subsequently, any and all of the rights conveyed by
53 this License.
54
551.10. “Modifications”
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to, deletion
60 from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. “Patent Claims” of a Contributor
65
66 means any patent claim(s), including without limitation, method, process,
67 and apparatus claims, in any patent Licensable by such Contributor that
68 would be infringed, but for the grant of the License, by the making,
69 using, selling, offering for sale, having made, import, or transfer of
70 either its Contributions or its Contributor Version.
71
721.12. “Secondary License”
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. “Source Code Form”
79
80 means the form of the work preferred for making modifications.
81
821.14. “You” (or “Your”)
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, “You” includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, “control” means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or as
104 part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its Contributions
108 or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution become
113 effective for each Contribution on the date the Contributor first distributes
114 such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under this
119 License. No additional rights or licenses will be implied from the distribution
120 or licensing of Covered Software under this License. Notwithstanding Section
121 2.1(b) above, no patent license is granted by a Contributor:
122
123 a. for any code that a Contributor has removed from Covered Software; or
124
125 b. for infringements caused by: (i) Your and any other third party’s
126 modifications of Covered Software, or (ii) the combination of its
127 Contributions with other software (except as part of its Contributor
128 Version); or
129
130 c. under Patent Claims infringed by Covered Software in the absence of its
131 Contributions.
132
133 This License does not grant any rights in the trademarks, service marks, or
134 logos of any Contributor (except as may be necessary to comply with the
135 notice requirements in Section 3.4).
136
1372.4. Subsequent Licenses
138
139 No Contributor makes additional grants as a result of Your choice to
140 distribute the Covered Software under a subsequent version of this License
141 (see Section 10.2) or under the terms of a Secondary License (if permitted
142 under the terms of Section 3.3).
143
1442.5. Representation
145
146 Each Contributor represents that the Contributor believes its Contributions
147 are its original creation(s) or it has sufficient rights to grant the
148 rights to its Contributions conveyed by this License.
149
1502.6. Fair Use
151
152 This License is not intended to limit any rights You have under applicable
153 copyright doctrines of fair use, fair dealing, or other equivalents.
154
1552.7. Conditions
156
157 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
158 Section 2.1.
159
160
1613. Responsibilities
162
1633.1. Distribution of Source Form
164
165 All distribution of Covered Software in Source Code Form, including any
166 Modifications that You create or to which You contribute, must be under the
167 terms of this License. You must inform recipients that the Source Code Form
168 of the Covered Software is governed by the terms of this License, and how
169 they can obtain a copy of this License. You may not attempt to alter or
170 restrict the recipients’ rights in the Source Code Form.
171
1723.2. Distribution of Executable Form
173
174 If You distribute Covered Software in Executable Form then:
175
176 a. such Covered Software must also be made available in Source Code Form,
177 as described in Section 3.1, and You must inform recipients of the
178 Executable Form how they can obtain a copy of such Source Code Form by
179 reasonable means in a timely manner, at a charge no more than the cost
180 of distribution to the recipient; and
181
182 b. You may distribute such Executable Form under the terms of this License,
183 or sublicense it under different terms, provided that the license for
184 the Executable Form does not attempt to limit or alter the recipients’
185 rights in the Source Code Form under this License.
186
1873.3. Distribution of a Larger Work
188
189 You may create and distribute a Larger Work under terms of Your choice,
190 provided that You also comply with the requirements of this License for the
191 Covered Software. If the Larger Work is a combination of Covered Software
192 with a work governed by one or more Secondary Licenses, and the Covered
193 Software is not Incompatible With Secondary Licenses, this License permits
194 You to additionally distribute such Covered Software under the terms of
195 such Secondary License(s), so that the recipient of the Larger Work may, at
196 their option, further distribute the Covered Software under the terms of
197 either this License or such Secondary License(s).
198
1993.4. Notices
200
201 You may not remove or alter the substance of any license notices (including
202 copyright notices, patent notices, disclaimers of warranty, or limitations
203 of liability) contained within the Source Code Form of the Covered
204 Software, except that You may alter any license notices to the extent
205 required to remedy known factual inaccuracies.
206
2073.5. Application of Additional Terms
208
209 You may choose to offer, and to charge a fee for, warranty, support,
210 indemnity or liability obligations to one or more recipients of Covered
211 Software. However, You may do so only on Your own behalf, and not on behalf
212 of any Contributor. You must make it absolutely clear that any such
213 warranty, support, indemnity, or liability obligation is offered by You
214 alone, and You hereby agree to indemnify every Contributor for any
215 liability incurred by such Contributor as a result of warranty, support,
216 indemnity or liability terms You offer. You may include additional
217 disclaimers of warranty and limitations of liability specific to any
218 jurisdiction.
219
2204. Inability to Comply Due to Statute or Regulation
221
222 If it is impossible for You to comply with any of the terms of this License
223 with respect to some or all of the Covered Software due to statute, judicial
224 order, or regulation then You must: (a) comply with the terms of this License
225 to the maximum extent possible; and (b) describe the limitations and the code
226 they affect. Such description must be placed in a text file included with all
227 distributions of the Covered Software under this License. Except to the
228 extent prohibited by statute or regulation, such description must be
229 sufficiently detailed for a recipient of ordinary skill to be able to
230 understand it.
231
2325. Termination
233
2345.1. The rights granted under this License will terminate automatically if You
235 fail to comply with any of its terms. However, if You become compliant,
236 then the rights granted under this License from a particular Contributor
237 are reinstated (a) provisionally, unless and until such Contributor
238 explicitly and finally terminates Your grants, and (b) on an ongoing basis,
239 if such Contributor fails to notify You of the non-compliance by some
240 reasonable means prior to 60 days after You have come back into compliance.
241 Moreover, Your grants from a particular Contributor are reinstated on an
242 ongoing basis if such Contributor notifies You of the non-compliance by
243 some reasonable means, this is the first time You have received notice of
244 non-compliance with this License from such Contributor, and You become
245 compliant prior to 30 days after Your receipt of the notice.
246
2475.2. If You initiate litigation against any entity by asserting a patent
248 infringement claim (excluding declaratory judgment actions, counter-claims,
249 and cross-claims) alleging that a Contributor Version directly or
250 indirectly infringes any patent, then the rights granted to You by any and
251 all Contributors for the Covered Software under Section 2.1 of this License
252 shall terminate.
253
2545.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
255 license agreements (excluding distributors and resellers) which have been
256 validly granted by You or Your distributors under this License prior to
257 termination shall survive termination.
258
2596. Disclaimer of Warranty
260
261 Covered Software is provided under this License on an “as is” basis, without
262 warranty of any kind, either expressed, implied, or statutory, including,
263 without limitation, warranties that the Covered Software is free of defects,
264 merchantable, fit for a particular purpose or non-infringing. The entire
265 risk as to the quality and performance of the Covered Software is with You.
266 Should any Covered Software prove defective in any respect, You (not any
267 Contributor) assume the cost of any necessary servicing, repair, or
268 correction. This disclaimer of warranty constitutes an essential part of this
269 License. No use of any Covered Software is authorized under this License
270 except under this disclaimer.
271
2727. Limitation of Liability
273
274 Under no circumstances and under no legal theory, whether tort (including
275 negligence), contract, or otherwise, shall any Contributor, or anyone who
276 distributes Covered Software as permitted above, be liable to You for any
277 direct, indirect, special, incidental, or consequential damages of any
278 character including, without limitation, damages for lost profits, loss of
279 goodwill, work stoppage, computer failure or malfunction, or any and all
280 other commercial damages or losses, even if such party shall have been
281 informed of the possibility of such damages. This limitation of liability
282 shall not apply to liability for death or personal injury resulting from such
283 party’s negligence to the extent applicable law prohibits such limitation.
284 Some jurisdictions do not allow the exclusion or limitation of incidental or
285 consequential damages, so this exclusion and limitation may not apply to You.
286
2878. Litigation
288
289 Any litigation relating to this License may be brought only in the courts of
290 a jurisdiction where the defendant maintains its principal place of business
291 and such litigation shall be governed by laws of that jurisdiction, without
292 reference to its conflict-of-law provisions. Nothing in this Section shall
293 prevent a party’s ability to bring cross-claims or counter-claims.
294
2959. Miscellaneous
296
297 This License represents the complete agreement concerning the subject matter
298 hereof. If any provision of this License is held to be unenforceable, such
299 provision shall be reformed only to the extent necessary to make it
300 enforceable. Any law or regulation which provides that the language of a
301 contract shall be construed against the drafter shall not be used to construe
302 this License against a Contributor.
303
304
30510. Versions of the License
306
30710.1. New Versions
308
309 Mozilla Foundation is the license steward. Except as provided in Section
310 10.3, no one other than the license steward has the right to modify or
311 publish new versions of this License. Each version will be given a
312 distinguishing version number.
313
31410.2. Effect of New Versions
315
316 You may distribute the Covered Software under the terms of the version of
317 the License under which You originally received the Covered Software, or
318 under the terms of any subsequent version published by the license
319 steward.
320
32110.3. Modified Versions
322
323 If you create software not governed by this License, and you want to
324 create a new license for such software, you may create and use a modified
325 version of this License if you rename the license and remove any
326 references to the name of the license steward (except to note that such
327 modified license differs from this License).
328
32910.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
330 If You choose to distribute Source Code Form that is Incompatible With
331 Secondary Licenses under the terms of this version of the License, the
332 notice described in Exhibit B of this License must be attached.
333
334Exhibit A - Source Code Form License Notice
335
336 This Source Code Form is subject to the
337 terms of the Mozilla Public License, v.
338 2.0. If a copy of the MPL was not
339 distributed with this file, You can
340 obtain one at
341 http://mozilla.org/MPL/2.0/.
342
343If it is not possible or desirable to put the notice in a particular file, then
344You may include the notice in a location (such as a LICENSE file in a relevant
345directory) where a recipient would be likely to look for such a notice.
346
347You may add additional accurate notices of copyright ownership.
348
349Exhibit B - “Incompatible With Secondary Licenses” Notice
350
351 This Source Code Form is “Incompatible
352 With Secondary Licenses”, as defined by
353 the Mozilla Public License, v. 2.0.
354
diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md
new file mode 100644
index 0000000..6f3a15c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/README.md
@@ -0,0 +1,65 @@
1# Versioning Library for Go
2[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version)
3
4go-version is a library for parsing versions and version constraints,
5and verifying versions against a set of constraints. go-version
6can sort a collection of versions properly, handles prerelease/beta
7versions, can increment versions, etc.
8
9Versions used with go-version must follow [SemVer](http://semver.org/).
10
11## Installation and Usage
12
13Package documentation can be found on
14[GoDoc](http://godoc.org/github.com/hashicorp/go-version).
15
16Installation can be done with a normal `go get`:
17
18```
19$ go get github.com/hashicorp/go-version
20```
21
22#### Version Parsing and Comparison
23
24```go
25v1, err := version.NewVersion("1.2")
26v2, err := version.NewVersion("1.5+metadata")
27
28// Comparison example. There is also GreaterThan, Equal, and just
29// a simple Compare that returns an int allowing easy >=, <=, etc.
30if v1.LessThan(v2) {
31 fmt.Printf("%s is less than %s", v1, v2)
32}
33```
34
35#### Version Constraints
36
37```go
38v1, err := version.NewVersion("1.2")
39
40// Constraints example.
41constraints, err := version.NewConstraint(">= 1.0, < 1.4")
42if constraints.Check(v1) {
43 fmt.Printf("%s satisfies constraints %s", v1, constraints)
44}
45```
46
47#### Version Sorting
48
49```go
50versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"}
51versions := make([]*version.Version, len(versionsRaw))
52for i, raw := range versionsRaw {
53 v, _ := version.NewVersion(raw)
54 versions[i] = v
55}
56
57// After this, the versions are properly sorted
58sort.Sort(version.Collection(versions))
59```
60
61## Issues and Contributing
62
63If you find an issue with this library, please report an issue. If you'd
64like, we welcome any contributions. Fork this library and submit a pull
65request.
diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go
new file mode 100644
index 0000000..8c73df0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/constraint.go
@@ -0,0 +1,178 @@
1package version
2
3import (
4 "fmt"
5 "regexp"
6 "strings"
7)
8
9// Constraint represents a single constraint for a version, such as
10// ">= 1.0".
11type Constraint struct {
12 f constraintFunc
13 check *Version
14 original string
15}
16
17// Constraints is a slice of constraints. We make a custom type so that
18// we can add methods to it.
19type Constraints []*Constraint
20
21type constraintFunc func(v, c *Version) bool
22
23var constraintOperators map[string]constraintFunc
24
25var constraintRegexp *regexp.Regexp
26
27func init() {
28 constraintOperators = map[string]constraintFunc{
29 "": constraintEqual,
30 "=": constraintEqual,
31 "!=": constraintNotEqual,
32 ">": constraintGreaterThan,
33 "<": constraintLessThan,
34 ">=": constraintGreaterThanEqual,
35 "<=": constraintLessThanEqual,
36 "~>": constraintPessimistic,
37 }
38
39 ops := make([]string, 0, len(constraintOperators))
40 for k := range constraintOperators {
41 ops = append(ops, regexp.QuoteMeta(k))
42 }
43
44 constraintRegexp = regexp.MustCompile(fmt.Sprintf(
45 `^\s*(%s)\s*(%s)\s*$`,
46 strings.Join(ops, "|"),
47 VersionRegexpRaw))
48}
49
50// NewConstraint will parse one or more constraints from the given
51// constraint string. The string must be a comma-separated list of
52// constraints.
53func NewConstraint(v string) (Constraints, error) {
54 vs := strings.Split(v, ",")
55 result := make([]*Constraint, len(vs))
56 for i, single := range vs {
57 c, err := parseSingle(single)
58 if err != nil {
59 return nil, err
60 }
61
62 result[i] = c
63 }
64
65 return Constraints(result), nil
66}
67
68// Check tests if a version satisfies all the constraints.
69func (cs Constraints) Check(v *Version) bool {
70 for _, c := range cs {
71 if !c.Check(v) {
72 return false
73 }
74 }
75
76 return true
77}
78
79// Returns the string format of the constraints
80func (cs Constraints) String() string {
81 csStr := make([]string, len(cs))
82 for i, c := range cs {
83 csStr[i] = c.String()
84 }
85
86 return strings.Join(csStr, ",")
87}
88
89// Check tests if a constraint is validated by the given version.
90func (c *Constraint) Check(v *Version) bool {
91 return c.f(v, c.check)
92}
93
94func (c *Constraint) String() string {
95 return c.original
96}
97
98func parseSingle(v string) (*Constraint, error) {
99 matches := constraintRegexp.FindStringSubmatch(v)
100 if matches == nil {
101 return nil, fmt.Errorf("Malformed constraint: %s", v)
102 }
103
104 check, err := NewVersion(matches[2])
105 if err != nil {
106 return nil, err
107 }
108
109 return &Constraint{
110 f: constraintOperators[matches[1]],
111 check: check,
112 original: v,
113 }, nil
114}
115
116//-------------------------------------------------------------------
117// Constraint functions
118//-------------------------------------------------------------------
119
120func constraintEqual(v, c *Version) bool {
121 return v.Equal(c)
122}
123
124func constraintNotEqual(v, c *Version) bool {
125 return !v.Equal(c)
126}
127
128func constraintGreaterThan(v, c *Version) bool {
129 return v.Compare(c) == 1
130}
131
132func constraintLessThan(v, c *Version) bool {
133 return v.Compare(c) == -1
134}
135
136func constraintGreaterThanEqual(v, c *Version) bool {
137 return v.Compare(c) >= 0
138}
139
140func constraintLessThanEqual(v, c *Version) bool {
141 return v.Compare(c) <= 0
142}
143
144func constraintPessimistic(v, c *Version) bool {
145 // If the version being checked is naturally less than the constraint, then there
146 // is no way for the version to be valid against the constraint
147 if v.LessThan(c) {
148 return false
149 }
150 // We'll use this more than once, so grab the length now so it's a little cleaner
151 // to write the later checks
152 cs := len(c.segments)
153
154 // If the version being checked has less specificity than the constraint, then there
155 // is no way for the version to be valid against the constraint
156 if cs > len(v.segments) {
157 return false
158 }
159
160 // Check the segments in the constraint against those in the version. If the version
161 // being checked, at any point, does not have the same values in each index of the
162 // constraints segments, then it cannot be valid against the constraint.
163 for i := 0; i < c.si-1; i++ {
164 if v.segments[i] != c.segments[i] {
165 return false
166 }
167 }
168
169 // Check the last part of the segment in the constraint. If the version segment at
170 // this index is less than the constraints segment at this index, then it cannot
171 // be valid against the constraint
172 if c.segments[cs-1] > v.segments[cs-1] {
173 return false
174 }
175
176 // If nothing has rejected the version by now, it's valid
177 return true
178}
diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go
new file mode 100644
index 0000000..ae2f6b6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/version.go
@@ -0,0 +1,308 @@
1package version
2
3import (
4 "bytes"
5 "fmt"
6 "reflect"
7 "regexp"
8 "strconv"
9 "strings"
10)
11
12// The compiled regular expression used to test the validity of a version.
13var versionRegexp *regexp.Regexp
14
15// The raw regular expression string used for testing the validity
16// of a version.
17const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
18 `(-?([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
19 `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
20 `?`
21
22// Version represents a single version.
23type Version struct {
24 metadata string
25 pre string
26 segments []int64
27 si int
28}
29
30func init() {
31 versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
32}
33
34// NewVersion parses the given version and returns a new
35// Version.
36func NewVersion(v string) (*Version, error) {
37 matches := versionRegexp.FindStringSubmatch(v)
38 if matches == nil {
39 return nil, fmt.Errorf("Malformed version: %s", v)
40 }
41 segmentsStr := strings.Split(matches[1], ".")
42 segments := make([]int64, len(segmentsStr))
43 si := 0
44 for i, str := range segmentsStr {
45 val, err := strconv.ParseInt(str, 10, 64)
46 if err != nil {
47 return nil, fmt.Errorf(
48 "Error parsing version: %s", err)
49 }
50
51 segments[i] = int64(val)
52 si++
53 }
54
55 // Even though we could support more than three segments, if we
56 // got less than three, pad it with 0s. This is to cover the basic
57 // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum
58 for i := len(segments); i < 3; i++ {
59 segments = append(segments, 0)
60 }
61
62 return &Version{
63 metadata: matches[7],
64 pre: matches[4],
65 segments: segments,
66 si: si,
67 }, nil
68}
69
70// Must is a helper that wraps a call to a function returning (*Version, error)
71// and panics if error is non-nil.
72func Must(v *Version, err error) *Version {
73 if err != nil {
74 panic(err)
75 }
76
77 return v
78}
79
80// Compare compares this version to another version. This
81// returns -1, 0, or 1 if this version is smaller, equal,
82// or larger than the other version, respectively.
83//
84// If you want boolean results, use the LessThan, Equal,
85// or GreaterThan methods.
86func (v *Version) Compare(other *Version) int {
87 // A quick, efficient equality check
88 if v.String() == other.String() {
89 return 0
90 }
91
92 segmentsSelf := v.Segments64()
93 segmentsOther := other.Segments64()
94
95 // If the segments are the same, we must compare on prerelease info
96 if reflect.DeepEqual(segmentsSelf, segmentsOther) {
97 preSelf := v.Prerelease()
98 preOther := other.Prerelease()
99 if preSelf == "" && preOther == "" {
100 return 0
101 }
102 if preSelf == "" {
103 return 1
104 }
105 if preOther == "" {
106 return -1
107 }
108
109 return comparePrereleases(preSelf, preOther)
110 }
111
112 // Get the highest specificity (hS), or if they're equal, just use segmentSelf length
113 lenSelf := len(segmentsSelf)
114 lenOther := len(segmentsOther)
115 hS := lenSelf
116 if lenSelf < lenOther {
117 hS = lenOther
118 }
119 // Compare the segments
120 // Because a constraint could have more/less specificity than the version it's
121 // checking, we need to account for a lopsided or jagged comparison
122 for i := 0; i < hS; i++ {
123 if i > lenSelf-1 {
124 // This means Self had the lower specificity
125 // Check to see if the remaining segments in Other are all zeros
126 if !allZero(segmentsOther[i:]) {
127 // if not, it means that Other has to be greater than Self
128 return -1
129 }
130 break
131 } else if i > lenOther-1 {
132 // this means Other had the lower specificity
133 // Check to see if the remaining segments in Self are all zeros -
134 if !allZero(segmentsSelf[i:]) {
135 //if not, it means that Self has to be greater than Other
136 return 1
137 }
138 break
139 }
140 lhs := segmentsSelf[i]
141 rhs := segmentsOther[i]
142 if lhs == rhs {
143 continue
144 } else if lhs < rhs {
145 return -1
146 }
147 // Otherwis, rhs was > lhs, they're not equal
148 return 1
149 }
150
151 // if we got this far, they're equal
152 return 0
153}
154
155func allZero(segs []int64) bool {
156 for _, s := range segs {
157 if s != 0 {
158 return false
159 }
160 }
161 return true
162}
163
164func comparePart(preSelf string, preOther string) int {
165 if preSelf == preOther {
166 return 0
167 }
168
169 // if a part is empty, we use the other to decide
170 if preSelf == "" {
171 _, notIsNumeric := strconv.ParseInt(preOther, 10, 64)
172 if notIsNumeric == nil {
173 return -1
174 }
175 return 1
176 }
177
178 if preOther == "" {
179 _, notIsNumeric := strconv.ParseInt(preSelf, 10, 64)
180 if notIsNumeric == nil {
181 return 1
182 }
183 return -1
184 }
185
186 if preSelf > preOther {
187 return 1
188 }
189
190 return -1
191}
192
193func comparePrereleases(v string, other string) int {
194 // the same pre release!
195 if v == other {
196 return 0
197 }
198
199 // split both pre releases for analyse their parts
200 selfPreReleaseMeta := strings.Split(v, ".")
201 otherPreReleaseMeta := strings.Split(other, ".")
202
203 selfPreReleaseLen := len(selfPreReleaseMeta)
204 otherPreReleaseLen := len(otherPreReleaseMeta)
205
206 biggestLen := otherPreReleaseLen
207 if selfPreReleaseLen > otherPreReleaseLen {
208 biggestLen = selfPreReleaseLen
209 }
210
211 // loop for parts to find the first difference
212 for i := 0; i < biggestLen; i = i + 1 {
213 partSelfPre := ""
214 if i < selfPreReleaseLen {
215 partSelfPre = selfPreReleaseMeta[i]
216 }
217
218 partOtherPre := ""
219 if i < otherPreReleaseLen {
220 partOtherPre = otherPreReleaseMeta[i]
221 }
222
223 compare := comparePart(partSelfPre, partOtherPre)
224 // if parts are equals, continue the loop
225 if compare != 0 {
226 return compare
227 }
228 }
229
230 return 0
231}
232
233// Equal tests if two versions are equal.
234func (v *Version) Equal(o *Version) bool {
235 return v.Compare(o) == 0
236}
237
238// GreaterThan tests if this version is greater than another version.
239func (v *Version) GreaterThan(o *Version) bool {
240 return v.Compare(o) > 0
241}
242
243// LessThan tests if this version is less than another version.
244func (v *Version) LessThan(o *Version) bool {
245 return v.Compare(o) < 0
246}
247
248// Metadata returns any metadata that was part of the version
249// string.
250//
251// Metadata is anything that comes after the "+" in the version.
252// For example, with "1.2.3+beta", the metadata is "beta".
253func (v *Version) Metadata() string {
254 return v.metadata
255}
256
257// Prerelease returns any prerelease data that is part of the version,
258// or blank if there is no prerelease data.
259//
260// Prerelease information is anything that comes after the "-" in the
261// version (but before any metadata). For example, with "1.2.3-beta",
262// the prerelease information is "beta".
263func (v *Version) Prerelease() string {
264 return v.pre
265}
266
267// Segments returns the numeric segments of the version as a slice of ints.
268//
269// This excludes any metadata or pre-release information. For example,
270// for a version "1.2.3-beta", segments will return a slice of
271// 1, 2, 3.
272func (v *Version) Segments() []int {
273 segmentSlice := make([]int, len(v.segments))
274 for i, v := range v.segments {
275 segmentSlice[i] = int(v)
276 }
277 return segmentSlice
278}
279
280// Segments64 returns the numeric segments of the version as a slice of int64s.
281//
282// This excludes any metadata or pre-release information. For example,
283// for a version "1.2.3-beta", segments will return a slice of
284// 1, 2, 3.
285func (v *Version) Segments64() []int64 {
286 return v.segments
287}
288
289// String returns the full version string included pre-release
290// and metadata information.
291func (v *Version) String() string {
292 var buf bytes.Buffer
293 fmtParts := make([]string, len(v.segments))
294 for i, s := range v.segments {
295 // We can ignore err here since we've pre-parsed the values in segments
296 str := strconv.FormatInt(s, 10)
297 fmtParts[i] = str
298 }
299 fmt.Fprintf(&buf, strings.Join(fmtParts, "."))
300 if v.pre != "" {
301 fmt.Fprintf(&buf, "-%s", v.pre)
302 }
303 if v.metadata != "" {
304 fmt.Fprintf(&buf, "+%s", v.metadata)
305 }
306
307 return buf.String()
308}
diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go
new file mode 100644
index 0000000..cc888d4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-version/version_collection.go
@@ -0,0 +1,17 @@
1package version
2
3// Collection is a type that implements the sort.Interface interface
4// so that versions can be sorted.
5type Collection []*Version
6
7func (v Collection) Len() int {
8 return len(v)
9}
10
11func (v Collection) Less(i, j int) bool {
12 return v[i].LessThan(v[j])
13}
14
15func (v Collection) Swap(i, j int) {
16 v[i], v[j] = v[j], v[i]
17}
diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/LICENSE
@@ -0,0 +1,354 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. “Contributor”
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. “Contributor Version”
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor’s Contribution.
14
151.3. “Contribution”
16
17 means Covered Software of a particular Contributor.
18
191.4. “Covered Software”
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. “Incompatible With Secondary Licenses”
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of version
33 1.1 or earlier of the License, but not also under the terms of a
34 Secondary License.
35
361.6. “Executable Form”
37
38 means any form of the work other than Source Code Form.
39
401.7. “Larger Work”
41
42 means a work that combines Covered Software with other material, in a separate
43 file or files, that is not Covered Software.
44
451.8. “License”
46
47 means this document.
48
491.9. “Licensable”
50
51 means having the right to grant, to the maximum extent possible, whether at the
52 time of the initial grant or subsequently, any and all of the rights conveyed by
53 this License.
54
551.10. “Modifications”
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to, deletion
60 from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. “Patent Claims” of a Contributor
65
66 means any patent claim(s), including without limitation, method, process,
67 and apparatus claims, in any patent Licensable by such Contributor that
68 would be infringed, but for the grant of the License, by the making,
69 using, selling, offering for sale, having made, import, or transfer of
70 either its Contributions or its Contributor Version.
71
721.12. “Secondary License”
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. “Source Code Form”
79
80 means the form of the work preferred for making modifications.
81
821.14. “You” (or “Your”)
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, “You” includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, “control” means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or as
104 part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its Contributions
108 or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution become
113 effective for each Contribution on the date the Contributor first distributes
114 such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under this
119 License. No additional rights or licenses will be implied from the distribution
120 or licensing of Covered Software under this License. Notwithstanding Section
121 2.1(b) above, no patent license is granted by a Contributor:
122
123 a. for any code that a Contributor has removed from Covered Software; or
124
125 b. for infringements caused by: (i) Your and any other third party’s
126 modifications of Covered Software, or (ii) the combination of its
127 Contributions with other software (except as part of its Contributor
128 Version); or
129
130 c. under Patent Claims infringed by Covered Software in the absence of its
131 Contributions.
132
133 This License does not grant any rights in the trademarks, service marks, or
134 logos of any Contributor (except as may be necessary to comply with the
135 notice requirements in Section 3.4).
136
1372.4. Subsequent Licenses
138
139 No Contributor makes additional grants as a result of Your choice to
140 distribute the Covered Software under a subsequent version of this License
141 (see Section 10.2) or under the terms of a Secondary License (if permitted
142 under the terms of Section 3.3).
143
1442.5. Representation
145
146 Each Contributor represents that the Contributor believes its Contributions
147 are its original creation(s) or it has sufficient rights to grant the
148 rights to its Contributions conveyed by this License.
149
1502.6. Fair Use
151
152 This License is not intended to limit any rights You have under applicable
153 copyright doctrines of fair use, fair dealing, or other equivalents.
154
1552.7. Conditions
156
157 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
158 Section 2.1.
159
160
1613. Responsibilities
162
1633.1. Distribution of Source Form
164
165 All distribution of Covered Software in Source Code Form, including any
166 Modifications that You create or to which You contribute, must be under the
167 terms of this License. You must inform recipients that the Source Code Form
168 of the Covered Software is governed by the terms of this License, and how
169 they can obtain a copy of this License. You may not attempt to alter or
170 restrict the recipients’ rights in the Source Code Form.
171
1723.2. Distribution of Executable Form
173
174 If You distribute Covered Software in Executable Form then:
175
176 a. such Covered Software must also be made available in Source Code Form,
177 as described in Section 3.1, and You must inform recipients of the
178 Executable Form how they can obtain a copy of such Source Code Form by
179 reasonable means in a timely manner, at a charge no more than the cost
180 of distribution to the recipient; and
181
182 b. You may distribute such Executable Form under the terms of this License,
183 or sublicense it under different terms, provided that the license for
184 the Executable Form does not attempt to limit or alter the recipients’
185 rights in the Source Code Form under this License.
186
1873.3. Distribution of a Larger Work
188
189 You may create and distribute a Larger Work under terms of Your choice,
190 provided that You also comply with the requirements of this License for the
191 Covered Software. If the Larger Work is a combination of Covered Software
192 with a work governed by one or more Secondary Licenses, and the Covered
193 Software is not Incompatible With Secondary Licenses, this License permits
194 You to additionally distribute such Covered Software under the terms of
195 such Secondary License(s), so that the recipient of the Larger Work may, at
196 their option, further distribute the Covered Software under the terms of
197 either this License or such Secondary License(s).
198
1993.4. Notices
200
201 You may not remove or alter the substance of any license notices (including
202 copyright notices, patent notices, disclaimers of warranty, or limitations
203 of liability) contained within the Source Code Form of the Covered
204 Software, except that You may alter any license notices to the extent
205 required to remedy known factual inaccuracies.
206
2073.5. Application of Additional Terms
208
209 You may choose to offer, and to charge a fee for, warranty, support,
210 indemnity or liability obligations to one or more recipients of Covered
211 Software. However, You may do so only on Your own behalf, and not on behalf
212 of any Contributor. You must make it absolutely clear that any such
213 warranty, support, indemnity, or liability obligation is offered by You
214 alone, and You hereby agree to indemnify every Contributor for any
215 liability incurred by such Contributor as a result of warranty, support,
216 indemnity or liability terms You offer. You may include additional
217 disclaimers of warranty and limitations of liability specific to any
218 jurisdiction.
219
2204. Inability to Comply Due to Statute or Regulation
221
222 If it is impossible for You to comply with any of the terms of this License
223 with respect to some or all of the Covered Software due to statute, judicial
224 order, or regulation then You must: (a) comply with the terms of this License
225 to the maximum extent possible; and (b) describe the limitations and the code
226 they affect. Such description must be placed in a text file included with all
227 distributions of the Covered Software under this License. Except to the
228 extent prohibited by statute or regulation, such description must be
229 sufficiently detailed for a recipient of ordinary skill to be able to
230 understand it.
231
2325. Termination
233
2345.1. The rights granted under this License will terminate automatically if You
235 fail to comply with any of its terms. However, if You become compliant,
236 then the rights granted under this License from a particular Contributor
237 are reinstated (a) provisionally, unless and until such Contributor
238 explicitly and finally terminates Your grants, and (b) on an ongoing basis,
239 if such Contributor fails to notify You of the non-compliance by some
240 reasonable means prior to 60 days after You have come back into compliance.
241 Moreover, Your grants from a particular Contributor are reinstated on an
242 ongoing basis if such Contributor notifies You of the non-compliance by
243 some reasonable means, this is the first time You have received notice of
244 non-compliance with this License from such Contributor, and You become
245 compliant prior to 30 days after Your receipt of the notice.
246
2475.2. If You initiate litigation against any entity by asserting a patent
248 infringement claim (excluding declaratory judgment actions, counter-claims,
249 and cross-claims) alleging that a Contributor Version directly or
250 indirectly infringes any patent, then the rights granted to You by any and
251 all Contributors for the Covered Software under Section 2.1 of this License
252 shall terminate.
253
2545.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
255 license agreements (excluding distributors and resellers) which have been
256 validly granted by You or Your distributors under this License prior to
257 termination shall survive termination.
258
2596. Disclaimer of Warranty
260
261 Covered Software is provided under this License on an “as is” basis, without
262 warranty of any kind, either expressed, implied, or statutory, including,
263 without limitation, warranties that the Covered Software is free of defects,
264 merchantable, fit for a particular purpose or non-infringing. The entire
265 risk as to the quality and performance of the Covered Software is with You.
266 Should any Covered Software prove defective in any respect, You (not any
267 Contributor) assume the cost of any necessary servicing, repair, or
268 correction. This disclaimer of warranty constitutes an essential part of this
269 License. No use of any Covered Software is authorized under this License
270 except under this disclaimer.
271
2727. Limitation of Liability
273
274 Under no circumstances and under no legal theory, whether tort (including
275 negligence), contract, or otherwise, shall any Contributor, or anyone who
276 distributes Covered Software as permitted above, be liable to You for any
277 direct, indirect, special, incidental, or consequential damages of any
278 character including, without limitation, damages for lost profits, loss of
279 goodwill, work stoppage, computer failure or malfunction, or any and all
280 other commercial damages or losses, even if such party shall have been
281 informed of the possibility of such damages. This limitation of liability
282 shall not apply to liability for death or personal injury resulting from such
283 party’s negligence to the extent applicable law prohibits such limitation.
284 Some jurisdictions do not allow the exclusion or limitation of incidental or
285 consequential damages, so this exclusion and limitation may not apply to You.
286
2878. Litigation
288
289 Any litigation relating to this License may be brought only in the courts of
290 a jurisdiction where the defendant maintains its principal place of business
291 and such litigation shall be governed by laws of that jurisdiction, without
292 reference to its conflict-of-law provisions. Nothing in this Section shall
293 prevent a party’s ability to bring cross-claims or counter-claims.
294
2959. Miscellaneous
296
297 This License represents the complete agreement concerning the subject matter
298 hereof. If any provision of this License is held to be unenforceable, such
299 provision shall be reformed only to the extent necessary to make it
300 enforceable. Any law or regulation which provides that the language of a
301 contract shall be construed against the drafter shall not be used to construe
302 this License against a Contributor.
303
304
30510. Versions of the License
306
30710.1. New Versions
308
309 Mozilla Foundation is the license steward. Except as provided in Section
310 10.3, no one other than the license steward has the right to modify or
311 publish new versions of this License. Each version will be given a
312 distinguishing version number.
313
31410.2. Effect of New Versions
315
316 You may distribute the Covered Software under the terms of the version of
317 the License under which You originally received the Covered Software, or
318 under the terms of any subsequent version published by the license
319 steward.
320
32110.3. Modified Versions
322
323 If you create software not governed by this License, and you want to
324 create a new license for such software, you may create and use a modified
325 version of this License if you rename the license and remove any
326 references to the name of the license steward (except to note that such
327 modified license differs from this License).
328
32910.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
330 If You choose to distribute Source Code Form that is Incompatible With
331 Secondary Licenses under the terms of this version of the License, the
332 notice described in Exhibit B of this License must be attached.
333
334Exhibit A - Source Code Form License Notice
335
336 This Source Code Form is subject to the
337 terms of the Mozilla Public License, v.
338 2.0. If a copy of the MPL was not
339 distributed with this file, You can
340 obtain one at
341 http://mozilla.org/MPL/2.0/.
342
343If it is not possible or desirable to put the notice in a particular file, then
344You may include the notice in a location (such as a LICENSE file in a relevant
345directory) where a recipient would be likely to look for such a notice.
346
347You may add additional accurate notices of copyright ownership.
348
349Exhibit B - “Incompatible With Secondary Licenses” Notice
350
351 This Source Code Form is “Incompatible
352 With Secondary Licenses”, as defined by
353 the Mozilla Public License, v. 2.0.
354
diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile
new file mode 100644
index 0000000..84fd743
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/Makefile
@@ -0,0 +1,18 @@
1TEST?=./...
2
3default: test
4
5fmt: generate
6 go fmt ./...
7
8test: generate
9 go get -t ./...
10 go test $(TEST) $(TESTARGS)
11
12generate:
13 go generate ./...
14
15updatedeps:
16 go get -u golang.org/x/tools/cmd/stringer
17
18.PHONY: default generate test updatedeps
diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md
new file mode 100644
index 0000000..c822332
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/README.md
@@ -0,0 +1,125 @@
1# HCL
2
3[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
4
5HCL (HashiCorp Configuration Language) is a configuration language built
6by HashiCorp. The goal of HCL is to build a structured configuration language
7that is both human and machine friendly for use with command-line tools, but
8specifically targeted towards DevOps tools, servers, etc.
9
10HCL is also fully JSON compatible. That is, JSON can be used as completely
11valid input to a system expecting HCL. This helps makes systems
12interoperable with other systems.
13
14HCL is heavily inspired by
15[libucl](https://github.com/vstakhov/libucl),
16nginx configuration, and others similar.
17
18## Why?
19
20A common question when viewing HCL is to ask the question: why not
21JSON, YAML, etc.?
22
23Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
24used a variety of configuration languages from full programming languages
25such as Ruby to complete data structure languages such as JSON. What we
26learned is that some people wanted human-friendly configuration languages
27and some people wanted machine-friendly languages.
28
29JSON fits a nice balance in this, but is fairly verbose and most
30importantly doesn't support comments. With YAML, we found that beginners
31had a really hard time determining what the actual structure was, and
32ended up guessing more often than not whether to use a hyphen, colon, etc.
33in order to represent some configuration key.
34
35Full programming languages such as Ruby enable complex behavior
36a configuration language shouldn't usually allow, and also forces
37people to learn some set of Ruby.
38
39Because of this, we decided to create our own configuration language
40that is JSON-compatible. Our configuration language (HCL) is designed
41to be written and modified by humans. The API for HCL allows JSON
42as an input so that it is also machine-friendly (machines can generate
43JSON instead of trying to generate HCL).
44
45Our goal with HCL is not to alienate other configuration languages.
46It is instead to provide HCL as a specialized language for our tools,
47and JSON as the interoperability layer.
48
49## Syntax
50
51For a complete grammar, please see the parser itself. A high-level overview
52of the syntax and grammar is listed here.
53
54 * Single line comments start with `#` or `//`
55
56 * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
57 are not allowed. A multi-line comment (also known as a block comment)
58 terminates at the first `*/` found.
59
60 * Values are assigned with the syntax `key = value` (whitespace doesn't
61 matter). The value can be any primitive: a string, number, boolean,
62 object, or list.
63
64 * Strings are double-quoted and can contain any UTF-8 characters.
65 Example: `"Hello, World"`
66
67 * Multi-line strings start with `<<EOF` at the end of a line, and end
68 with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
69 Any text may be used in place of `EOF`. Example:
70```
71<<FOO
72hello
73world
74FOO
75```
76
77 * Numbers are assumed to be base 10. If you prefix a number with 0x,
78 it is treated as a hexadecimal. If it is prefixed with 0, it is
79 treated as an octal. Numbers can be in scientific notation: "1e10".
80
81 * Boolean values: `true`, `false`
82
83 * Arrays can be made by wrapping it in `[]`. Example:
84 `["foo", "bar", 42]`. Arrays can contain primitives,
85 other arrays, and objects. As an alternative, lists
86 of objects can be created with repeated blocks, using
87 this structure:
88
89 ```hcl
90 service {
91 key = "value"
92 }
93
94 service {
95 key = "value"
96 }
97 ```
98
99Objects and nested objects are created using the structure shown below:
100
101```
102variable "ami" {
103 description = "the AMI to use"
104}
105```
106This would be equivalent to the following json:
107``` json
108{
109 "variable": {
110 "ami": {
111 "description": "the AMI to use"
112 }
113 }
114}
115```
116
117## Thanks
118
119Thanks to:
120
121 * [@vstakhov](https://github.com/vstakhov) - The original libucl parser
122 and syntax that HCL was based off of.
123
124 * [@fatih](https://github.com/fatih) - The rewritten HCL parser
125 in pure Go (no goyacc) and support for a printer.
diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml
new file mode 100644
index 0000000..4db0b71
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/appveyor.yml
@@ -0,0 +1,19 @@
1version: "build-{branch}-{build}"
2image: Visual Studio 2015
3clone_folder: c:\gopath\src\github.com\hashicorp\hcl
4environment:
5 GOPATH: c:\gopath
6init:
7 - git config --global core.autocrlf false
8install:
9- cmd: >-
10 echo %Path%
11
12 go version
13
14 go env
15
16 go get -t ./...
17
18build_script:
19- cmd: go test -v ./...
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
new file mode 100644
index 0000000..0b39c1b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -0,0 +1,724 @@
1package hcl
2
3import (
4 "errors"
5 "fmt"
6 "reflect"
7 "sort"
8 "strconv"
9 "strings"
10
11 "github.com/hashicorp/hcl/hcl/ast"
12 "github.com/hashicorp/hcl/hcl/parser"
13 "github.com/hashicorp/hcl/hcl/token"
14)
15
16// This is the tag to use with structures to have settings for HCL
17const tagName = "hcl"
18
19var (
20 // nodeType holds a reference to the type of ast.Node
21 nodeType reflect.Type = findNodeType()
22)
23
24// Unmarshal accepts a byte slice as input and writes the
25// data to the value pointed to by v.
26func Unmarshal(bs []byte, v interface{}) error {
27 root, err := parse(bs)
28 if err != nil {
29 return err
30 }
31
32 return DecodeObject(v, root)
33}
34
35// Decode reads the given input and decodes it into the structure
36// given by `out`.
37func Decode(out interface{}, in string) error {
38 obj, err := Parse(in)
39 if err != nil {
40 return err
41 }
42
43 return DecodeObject(out, obj)
44}
45
46// DecodeObject is a lower-level version of Decode. It decodes a
47// raw Object into the given output.
48func DecodeObject(out interface{}, n ast.Node) error {
49 val := reflect.ValueOf(out)
50 if val.Kind() != reflect.Ptr {
51 return errors.New("result must be a pointer")
52 }
53
54 // If we have the file, we really decode the root node
55 if f, ok := n.(*ast.File); ok {
56 n = f.Node
57 }
58
59 var d decoder
60 return d.decode("root", n, val.Elem())
61}
62
63type decoder struct {
64 stack []reflect.Kind
65}
66
67func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
68 k := result
69
70 // If we have an interface with a valid value, we use that
71 // for the check.
72 if result.Kind() == reflect.Interface {
73 elem := result.Elem()
74 if elem.IsValid() {
75 k = elem
76 }
77 }
78
79 // Push current onto stack unless it is an interface.
80 if k.Kind() != reflect.Interface {
81 d.stack = append(d.stack, k.Kind())
82
83 // Schedule a pop
84 defer func() {
85 d.stack = d.stack[:len(d.stack)-1]
86 }()
87 }
88
89 switch k.Kind() {
90 case reflect.Bool:
91 return d.decodeBool(name, node, result)
92 case reflect.Float64:
93 return d.decodeFloat(name, node, result)
94 case reflect.Int, reflect.Int32, reflect.Int64:
95 return d.decodeInt(name, node, result)
96 case reflect.Interface:
97 // When we see an interface, we make our own thing
98 return d.decodeInterface(name, node, result)
99 case reflect.Map:
100 return d.decodeMap(name, node, result)
101 case reflect.Ptr:
102 return d.decodePtr(name, node, result)
103 case reflect.Slice:
104 return d.decodeSlice(name, node, result)
105 case reflect.String:
106 return d.decodeString(name, node, result)
107 case reflect.Struct:
108 return d.decodeStruct(name, node, result)
109 default:
110 return &parser.PosError{
111 Pos: node.Pos(),
112 Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
113 }
114 }
115}
116
117func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
118 switch n := node.(type) {
119 case *ast.LiteralType:
120 if n.Token.Type == token.BOOL {
121 v, err := strconv.ParseBool(n.Token.Text)
122 if err != nil {
123 return err
124 }
125
126 result.Set(reflect.ValueOf(v))
127 return nil
128 }
129 }
130
131 return &parser.PosError{
132 Pos: node.Pos(),
133 Err: fmt.Errorf("%s: unknown type %T", name, node),
134 }
135}
136
137func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
138 switch n := node.(type) {
139 case *ast.LiteralType:
140 if n.Token.Type == token.FLOAT {
141 v, err := strconv.ParseFloat(n.Token.Text, 64)
142 if err != nil {
143 return err
144 }
145
146 result.Set(reflect.ValueOf(v))
147 return nil
148 }
149 }
150
151 return &parser.PosError{
152 Pos: node.Pos(),
153 Err: fmt.Errorf("%s: unknown type %T", name, node),
154 }
155}
156
157func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
158 switch n := node.(type) {
159 case *ast.LiteralType:
160 switch n.Token.Type {
161 case token.NUMBER:
162 v, err := strconv.ParseInt(n.Token.Text, 0, 0)
163 if err != nil {
164 return err
165 }
166
167 if result.Kind() == reflect.Interface {
168 result.Set(reflect.ValueOf(int(v)))
169 } else {
170 result.SetInt(v)
171 }
172 return nil
173 case token.STRING:
174 v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
175 if err != nil {
176 return err
177 }
178
179 if result.Kind() == reflect.Interface {
180 result.Set(reflect.ValueOf(int(v)))
181 } else {
182 result.SetInt(v)
183 }
184 return nil
185 }
186 }
187
188 return &parser.PosError{
189 Pos: node.Pos(),
190 Err: fmt.Errorf("%s: unknown type %T", name, node),
191 }
192}
193
194func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
195 // When we see an ast.Node, we retain the value to enable deferred decoding.
196 // Very useful in situations where we want to preserve ast.Node information
197 // like Pos
198 if result.Type() == nodeType && result.CanSet() {
199 result.Set(reflect.ValueOf(node))
200 return nil
201 }
202
203 var set reflect.Value
204 redecode := true
205
206 // For testing types, ObjectType should just be treated as a list. We
207 // set this to a temporary var because we want to pass in the real node.
208 testNode := node
209 if ot, ok := node.(*ast.ObjectType); ok {
210 testNode = ot.List
211 }
212
213 switch n := testNode.(type) {
214 case *ast.ObjectList:
215 // If we're at the root or we're directly within a slice, then we
216 // decode objects into map[string]interface{}, otherwise we decode
217 // them into lists.
218 if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
219 var temp map[string]interface{}
220 tempVal := reflect.ValueOf(temp)
221 result := reflect.MakeMap(
222 reflect.MapOf(
223 reflect.TypeOf(""),
224 tempVal.Type().Elem()))
225
226 set = result
227 } else {
228 var temp []map[string]interface{}
229 tempVal := reflect.ValueOf(temp)
230 result := reflect.MakeSlice(
231 reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
232 set = result
233 }
234 case *ast.ObjectType:
235 // If we're at the root or we're directly within a slice, then we
236 // decode objects into map[string]interface{}, otherwise we decode
237 // them into lists.
238 if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
239 var temp map[string]interface{}
240 tempVal := reflect.ValueOf(temp)
241 result := reflect.MakeMap(
242 reflect.MapOf(
243 reflect.TypeOf(""),
244 tempVal.Type().Elem()))
245
246 set = result
247 } else {
248 var temp []map[string]interface{}
249 tempVal := reflect.ValueOf(temp)
250 result := reflect.MakeSlice(
251 reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
252 set = result
253 }
254 case *ast.ListType:
255 var temp []interface{}
256 tempVal := reflect.ValueOf(temp)
257 result := reflect.MakeSlice(
258 reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
259 set = result
260 case *ast.LiteralType:
261 switch n.Token.Type {
262 case token.BOOL:
263 var result bool
264 set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
265 case token.FLOAT:
266 var result float64
267 set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
268 case token.NUMBER:
269 var result int
270 set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
271 case token.STRING, token.HEREDOC:
272 set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
273 default:
274 return &parser.PosError{
275 Pos: node.Pos(),
276 Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
277 }
278 }
279 default:
280 return fmt.Errorf(
281 "%s: cannot decode into interface: %T",
282 name, node)
283 }
284
285 // Set the result to what its supposed to be, then reset
286 // result so we don't reflect into this method anymore.
287 result.Set(set)
288
289 if redecode {
290 // Revisit the node so that we can use the newly instantiated
291 // thing and populate it.
292 if err := d.decode(name, node, result); err != nil {
293 return err
294 }
295 }
296
297 return nil
298}
299
300func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
301 if item, ok := node.(*ast.ObjectItem); ok {
302 node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
303 }
304
305 if ot, ok := node.(*ast.ObjectType); ok {
306 node = ot.List
307 }
308
309 n, ok := node.(*ast.ObjectList)
310 if !ok {
311 return &parser.PosError{
312 Pos: node.Pos(),
313 Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
314 }
315 }
316
317 // If we have an interface, then we can address the interface,
318 // but not the slice itself, so get the element but set the interface
319 set := result
320 if result.Kind() == reflect.Interface {
321 result = result.Elem()
322 }
323
324 resultType := result.Type()
325 resultElemType := resultType.Elem()
326 resultKeyType := resultType.Key()
327 if resultKeyType.Kind() != reflect.String {
328 return &parser.PosError{
329 Pos: node.Pos(),
330 Err: fmt.Errorf("%s: map must have string keys", name),
331 }
332 }
333
334 // Make a map if it is nil
335 resultMap := result
336 if result.IsNil() {
337 resultMap = reflect.MakeMap(
338 reflect.MapOf(resultKeyType, resultElemType))
339 }
340
341 // Go through each element and decode it.
342 done := make(map[string]struct{})
343 for _, item := range n.Items {
344 if item.Val == nil {
345 continue
346 }
347
348 // github.com/hashicorp/terraform/issue/5740
349 if len(item.Keys) == 0 {
350 return &parser.PosError{
351 Pos: node.Pos(),
352 Err: fmt.Errorf("%s: map must have string keys", name),
353 }
354 }
355
356 // Get the key we're dealing with, which is the first item
357 keyStr := item.Keys[0].Token.Value().(string)
358
359 // If we've already processed this key, then ignore it
360 if _, ok := done[keyStr]; ok {
361 continue
362 }
363
364 // Determine the value. If we have more than one key, then we
365 // get the objectlist of only these keys.
366 itemVal := item.Val
367 if len(item.Keys) > 1 {
368 itemVal = n.Filter(keyStr)
369 done[keyStr] = struct{}{}
370 }
371
372 // Make the field name
373 fieldName := fmt.Sprintf("%s.%s", name, keyStr)
374
375 // Get the key/value as reflection values
376 key := reflect.ValueOf(keyStr)
377 val := reflect.Indirect(reflect.New(resultElemType))
378
379 // If we have a pre-existing value in the map, use that
380 oldVal := resultMap.MapIndex(key)
381 if oldVal.IsValid() {
382 val.Set(oldVal)
383 }
384
385 // Decode!
386 if err := d.decode(fieldName, itemVal, val); err != nil {
387 return err
388 }
389
390 // Set the value on the map
391 resultMap.SetMapIndex(key, val)
392 }
393
394 // Set the final map if we can
395 set.Set(resultMap)
396 return nil
397}
398
399func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
400 // Create an element of the concrete (non pointer) type and decode
401 // into that. Then set the value of the pointer to this type.
402 resultType := result.Type()
403 resultElemType := resultType.Elem()
404 val := reflect.New(resultElemType)
405 if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
406 return err
407 }
408
409 result.Set(val)
410 return nil
411}
412
413func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
414 // If we have an interface, then we can address the interface,
415 // but not the slice itself, so get the element but set the interface
416 set := result
417 if result.Kind() == reflect.Interface {
418 result = result.Elem()
419 }
420 // Create the slice if it isn't nil
421 resultType := result.Type()
422 resultElemType := resultType.Elem()
423 if result.IsNil() {
424 resultSliceType := reflect.SliceOf(resultElemType)
425 result = reflect.MakeSlice(
426 resultSliceType, 0, 0)
427 }
428
429 // Figure out the items we'll be copying into the slice
430 var items []ast.Node
431 switch n := node.(type) {
432 case *ast.ObjectList:
433 items = make([]ast.Node, len(n.Items))
434 for i, item := range n.Items {
435 items[i] = item
436 }
437 case *ast.ObjectType:
438 items = []ast.Node{n}
439 case *ast.ListType:
440 items = n.List
441 default:
442 return &parser.PosError{
443 Pos: node.Pos(),
444 Err: fmt.Errorf("unknown slice type: %T", node),
445 }
446 }
447
448 for i, item := range items {
449 fieldName := fmt.Sprintf("%s[%d]", name, i)
450
451 // Decode
452 val := reflect.Indirect(reflect.New(resultElemType))
453
454 // if item is an object that was decoded from ambiguous JSON and
455 // flattened, make sure it's expanded if it needs to decode into a
456 // defined structure.
457 item := expandObject(item, val)
458
459 if err := d.decode(fieldName, item, val); err != nil {
460 return err
461 }
462
463 // Append it onto the slice
464 result = reflect.Append(result, val)
465 }
466
467 set.Set(result)
468 return nil
469}
470
471// expandObject detects if an ambiguous JSON object was flattened to a List which
472// should be decoded into a struct, and expands the ast to properly deocode.
473func expandObject(node ast.Node, result reflect.Value) ast.Node {
474 item, ok := node.(*ast.ObjectItem)
475 if !ok {
476 return node
477 }
478
479 elemType := result.Type()
480
481 // our target type must be a struct
482 switch elemType.Kind() {
483 case reflect.Ptr:
484 switch elemType.Elem().Kind() {
485 case reflect.Struct:
486 //OK
487 default:
488 return node
489 }
490 case reflect.Struct:
491 //OK
492 default:
493 return node
494 }
495
496 // A list value will have a key and field name. If it had more fields,
497 // it wouldn't have been flattened.
498 if len(item.Keys) != 2 {
499 return node
500 }
501
502 keyToken := item.Keys[0].Token
503 item.Keys = item.Keys[1:]
504
505 // we need to un-flatten the ast enough to decode
506 newNode := &ast.ObjectItem{
507 Keys: []*ast.ObjectKey{
508 &ast.ObjectKey{
509 Token: keyToken,
510 },
511 },
512 Val: &ast.ObjectType{
513 List: &ast.ObjectList{
514 Items: []*ast.ObjectItem{item},
515 },
516 },
517 }
518
519 return newNode
520}
521
522func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
523 switch n := node.(type) {
524 case *ast.LiteralType:
525 switch n.Token.Type {
526 case token.NUMBER:
527 result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
528 return nil
529 case token.STRING, token.HEREDOC:
530 result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
531 return nil
532 }
533 }
534
535 return &parser.PosError{
536 Pos: node.Pos(),
537 Err: fmt.Errorf("%s: unknown type for string %T", name, node),
538 }
539}
540
541func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
542 var item *ast.ObjectItem
543 if it, ok := node.(*ast.ObjectItem); ok {
544 item = it
545 node = it.Val
546 }
547
548 if ot, ok := node.(*ast.ObjectType); ok {
549 node = ot.List
550 }
551
552 // Handle the special case where the object itself is a literal. Previously
553 // the yacc parser would always ensure top-level elements were arrays. The new
554 // parser does not make the same guarantees, thus we need to convert any
555 // top-level literal elements into a list.
556 if _, ok := node.(*ast.LiteralType); ok && item != nil {
557 node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
558 }
559
560 list, ok := node.(*ast.ObjectList)
561 if !ok {
562 return &parser.PosError{
563 Pos: node.Pos(),
564 Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
565 }
566 }
567
568 // This slice will keep track of all the structs we'll be decoding.
569 // There can be more than one struct if there are embedded structs
570 // that are squashed.
571 structs := make([]reflect.Value, 1, 5)
572 structs[0] = result
573
574 // Compile the list of all the fields that we're going to be decoding
575 // from all the structs.
576 fields := make(map[*reflect.StructField]reflect.Value)
577 for len(structs) > 0 {
578 structVal := structs[0]
579 structs = structs[1:]
580
581 structType := structVal.Type()
582 for i := 0; i < structType.NumField(); i++ {
583 fieldType := structType.Field(i)
584 tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
585
586 // Ignore fields with tag name "-"
587 if tagParts[0] == "-" {
588 continue
589 }
590
591 if fieldType.Anonymous {
592 fieldKind := fieldType.Type.Kind()
593 if fieldKind != reflect.Struct {
594 return &parser.PosError{
595 Pos: node.Pos(),
596 Err: fmt.Errorf("%s: unsupported type to struct: %s",
597 fieldType.Name, fieldKind),
598 }
599 }
600
601 // We have an embedded field. We "squash" the fields down
602 // if specified in the tag.
603 squash := false
604 for _, tag := range tagParts[1:] {
605 if tag == "squash" {
606 squash = true
607 break
608 }
609 }
610
611 if squash {
612 structs = append(
613 structs, result.FieldByName(fieldType.Name))
614 continue
615 }
616 }
617
618 // Normal struct field, store it away
619 fields[&fieldType] = structVal.Field(i)
620 }
621 }
622
623 usedKeys := make(map[string]struct{})
624 decodedFields := make([]string, 0, len(fields))
625 decodedFieldsVal := make([]reflect.Value, 0)
626 unusedKeysVal := make([]reflect.Value, 0)
627 for fieldType, field := range fields {
628 if !field.IsValid() {
629 // This should never happen
630 panic("field is not valid")
631 }
632
633 // If we can't set the field, then it is unexported or something,
634 // and we just continue onwards.
635 if !field.CanSet() {
636 continue
637 }
638
639 fieldName := fieldType.Name
640
641 tagValue := fieldType.Tag.Get(tagName)
642 tagParts := strings.SplitN(tagValue, ",", 2)
643 if len(tagParts) >= 2 {
644 switch tagParts[1] {
645 case "decodedFields":
646 decodedFieldsVal = append(decodedFieldsVal, field)
647 continue
648 case "key":
649 if item == nil {
650 return &parser.PosError{
651 Pos: node.Pos(),
652 Err: fmt.Errorf("%s: %s asked for 'key', impossible",
653 name, fieldName),
654 }
655 }
656
657 field.SetString(item.Keys[0].Token.Value().(string))
658 continue
659 case "unusedKeys":
660 unusedKeysVal = append(unusedKeysVal, field)
661 continue
662 }
663 }
664
665 if tagParts[0] != "" {
666 fieldName = tagParts[0]
667 }
668
669 // Determine the element we'll use to decode. If it is a single
670 // match (only object with the field), then we decode it exactly.
671 // If it is a prefix match, then we decode the matches.
672 filter := list.Filter(fieldName)
673
674 prefixMatches := filter.Children()
675 matches := filter.Elem()
676 if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
677 continue
678 }
679
680 // Track the used key
681 usedKeys[fieldName] = struct{}{}
682
683 // Create the field name and decode. We range over the elements
684 // because we actually want the value.
685 fieldName = fmt.Sprintf("%s.%s", name, fieldName)
686 if len(prefixMatches.Items) > 0 {
687 if err := d.decode(fieldName, prefixMatches, field); err != nil {
688 return err
689 }
690 }
691 for _, match := range matches.Items {
692 var decodeNode ast.Node = match.Val
693 if ot, ok := decodeNode.(*ast.ObjectType); ok {
694 decodeNode = &ast.ObjectList{Items: ot.List.Items}
695 }
696
697 if err := d.decode(fieldName, decodeNode, field); err != nil {
698 return err
699 }
700 }
701
702 decodedFields = append(decodedFields, fieldType.Name)
703 }
704
705 if len(decodedFieldsVal) > 0 {
706 // Sort it so that it is deterministic
707 sort.Strings(decodedFields)
708
709 for _, v := range decodedFieldsVal {
710 v.Set(reflect.ValueOf(decodedFields))
711 }
712 }
713
714 return nil
715}
716
717// findNodeType returns the type of ast.Node
718func findNodeType() reflect.Type {
719 var nodeContainer struct {
720 Node ast.Node
721 }
722 value := reflect.ValueOf(nodeContainer).FieldByName("Node")
723 return value.Type()
724}
diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go
new file mode 100644
index 0000000..575a20b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl.go
@@ -0,0 +1,11 @@
1// Package hcl decodes HCL into usable Go structures.
2//
3// hcl input can come in either pure HCL format or JSON format.
4// It can be parsed into an AST, and then decoded into a structure,
5// or it can be decoded directly from a string into a structure.
6//
7// If you choose to parse HCL into a raw AST, the benefit is that you
8// can write custom visitor implementations to implement custom
9// semantic checks. By default, HCL does not perform any semantic
10// checks.
11package hcl
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
new file mode 100644
index 0000000..6e5ef65
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
@@ -0,0 +1,219 @@
1// Package ast declares the types used to represent syntax trees for HCL
2// (HashiCorp Configuration Language)
3package ast
4
5import (
6 "fmt"
7 "strings"
8
9 "github.com/hashicorp/hcl/hcl/token"
10)
11
12// Node is an element in the abstract syntax tree.
13type Node interface {
14 node()
15 Pos() token.Pos
16}
17
18func (File) node() {}
19func (ObjectList) node() {}
20func (ObjectKey) node() {}
21func (ObjectItem) node() {}
22func (Comment) node() {}
23func (CommentGroup) node() {}
24func (ObjectType) node() {}
25func (LiteralType) node() {}
26func (ListType) node() {}
27
28// File represents a single HCL file
29type File struct {
30 Node Node // usually a *ObjectList
31 Comments []*CommentGroup // list of all comments in the source
32}
33
34func (f *File) Pos() token.Pos {
35 return f.Node.Pos()
36}
37
38// ObjectList represents a list of ObjectItems. An HCL file itself is an
39// ObjectList.
40type ObjectList struct {
41 Items []*ObjectItem
42}
43
44func (o *ObjectList) Add(item *ObjectItem) {
45 o.Items = append(o.Items, item)
46}
47
48// Filter filters out the objects with the given key list as a prefix.
49//
50// The returned list of objects contain ObjectItems where the keys have
51// this prefix already stripped off. This might result in objects with
52// zero-length key lists if they have no children.
53//
54// If no matches are found, an empty ObjectList (non-nil) is returned.
55func (o *ObjectList) Filter(keys ...string) *ObjectList {
56 var result ObjectList
57 for _, item := range o.Items {
58 // If there aren't enough keys, then ignore this
59 if len(item.Keys) < len(keys) {
60 continue
61 }
62
63 match := true
64 for i, key := range item.Keys[:len(keys)] {
65 key := key.Token.Value().(string)
66 if key != keys[i] && !strings.EqualFold(key, keys[i]) {
67 match = false
68 break
69 }
70 }
71 if !match {
72 continue
73 }
74
75 // Strip off the prefix from the children
76 newItem := *item
77 newItem.Keys = newItem.Keys[len(keys):]
78 result.Add(&newItem)
79 }
80
81 return &result
82}
83
84// Children returns further nested objects (key length > 0) within this
85// ObjectList. This should be used with Filter to get at child items.
86func (o *ObjectList) Children() *ObjectList {
87 var result ObjectList
88 for _, item := range o.Items {
89 if len(item.Keys) > 0 {
90 result.Add(item)
91 }
92 }
93
94 return &result
95}
96
97// Elem returns items in the list that are direct element assignments
98// (key length == 0). This should be used with Filter to get at elements.
99func (o *ObjectList) Elem() *ObjectList {
100 var result ObjectList
101 for _, item := range o.Items {
102 if len(item.Keys) == 0 {
103 result.Add(item)
104 }
105 }
106
107 return &result
108}
109
110func (o *ObjectList) Pos() token.Pos {
111 // always returns the uninitiliazed position
112 return o.Items[0].Pos()
113}
114
115// ObjectItem represents a HCL Object Item. An item is represented with a key
116// (or keys). It can be an assignment or an object (both normal and nested)
117type ObjectItem struct {
118 // keys is only one length long if it's of type assignment. If it's a
119 // nested object it can be larger than one. In that case "assign" is
120 // invalid as there is no assignments for a nested object.
121 Keys []*ObjectKey
122
123 // assign contains the position of "=", if any
124 Assign token.Pos
125
126 // val is the item itself. It can be an object,list, number, bool or a
127 // string. If key length is larger than one, val can be only of type
128 // Object.
129 Val Node
130
131 LeadComment *CommentGroup // associated lead comment
132 LineComment *CommentGroup // associated line comment
133}
134
135func (o *ObjectItem) Pos() token.Pos {
136 // I'm not entirely sure what causes this, but removing this causes
137 // a test failure. We should investigate at some point.
138 if len(o.Keys) == 0 {
139 return token.Pos{}
140 }
141
142 return o.Keys[0].Pos()
143}
144
145// ObjectKeys are either an identifier or of type string.
146type ObjectKey struct {
147 Token token.Token
148}
149
150func (o *ObjectKey) Pos() token.Pos {
151 return o.Token.Pos
152}
153
154// LiteralType represents a literal of basic type. Valid types are:
155// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
156type LiteralType struct {
157 Token token.Token
158
159 // comment types, only used when in a list
160 LeadComment *CommentGroup
161 LineComment *CommentGroup
162}
163
164func (l *LiteralType) Pos() token.Pos {
165 return l.Token.Pos
166}
167
168// ListStatement represents a HCL List type
169type ListType struct {
170 Lbrack token.Pos // position of "["
171 Rbrack token.Pos // position of "]"
172 List []Node // the elements in lexical order
173}
174
175func (l *ListType) Pos() token.Pos {
176 return l.Lbrack
177}
178
179func (l *ListType) Add(node Node) {
180 l.List = append(l.List, node)
181}
182
183// ObjectType represents a HCL Object Type
184type ObjectType struct {
185 Lbrace token.Pos // position of "{"
186 Rbrace token.Pos // position of "}"
187 List *ObjectList // the nodes in lexical order
188}
189
190func (o *ObjectType) Pos() token.Pos {
191 return o.Lbrace
192}
193
194// Comment node represents a single //, # style or /*- style commment
195type Comment struct {
196 Start token.Pos // position of / or #
197 Text string
198}
199
200func (c *Comment) Pos() token.Pos {
201 return c.Start
202}
203
204// CommentGroup node represents a sequence of comments with no other tokens and
205// no empty lines between.
206type CommentGroup struct {
207 List []*Comment // len(List) > 0
208}
209
210func (c *CommentGroup) Pos() token.Pos {
211 return c.List[0].Pos()
212}
213
214//-------------------------------------------------------------------
215// GoStringer
216//-------------------------------------------------------------------
217
218func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
219func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
new file mode 100644
index 0000000..ba07ad4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
@@ -0,0 +1,52 @@
1package ast
2
3import "fmt"
4
5// WalkFunc describes a function to be called for each node during a Walk. The
6// returned node can be used to rewrite the AST. Walking stops the returned
7// bool is false.
8type WalkFunc func(Node) (Node, bool)
9
10// Walk traverses an AST in depth-first order: It starts by calling fn(node);
11// node must not be nil. If fn returns true, Walk invokes fn recursively for
12// each of the non-nil children of node, followed by a call of fn(nil). The
13// returned node of fn can be used to rewrite the passed node to fn.
14func Walk(node Node, fn WalkFunc) Node {
15 rewritten, ok := fn(node)
16 if !ok {
17 return rewritten
18 }
19
20 switch n := node.(type) {
21 case *File:
22 n.Node = Walk(n.Node, fn)
23 case *ObjectList:
24 for i, item := range n.Items {
25 n.Items[i] = Walk(item, fn).(*ObjectItem)
26 }
27 case *ObjectKey:
28 // nothing to do
29 case *ObjectItem:
30 for i, k := range n.Keys {
31 n.Keys[i] = Walk(k, fn).(*ObjectKey)
32 }
33
34 if n.Val != nil {
35 n.Val = Walk(n.Val, fn)
36 }
37 case *LiteralType:
38 // nothing to do
39 case *ListType:
40 for i, l := range n.List {
41 n.List[i] = Walk(l, fn)
42 }
43 case *ObjectType:
44 n.List = Walk(n.List, fn).(*ObjectList)
45 default:
46 // should we panic here?
47 fmt.Printf("unknown type: %T\n", n)
48 }
49
50 fn(nil)
51 return rewritten
52}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
new file mode 100644
index 0000000..5c99381
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
@@ -0,0 +1,17 @@
1package parser
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl/hcl/token"
7)
8
9// PosError is a parse error that contains a position.
10type PosError struct {
11 Pos token.Pos
12 Err error
13}
14
15func (e *PosError) Error() string {
16 return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
17}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
new file mode 100644
index 0000000..b488180
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -0,0 +1,520 @@
1// Package parser implements a parser for HCL (HashiCorp Configuration
2// Language)
3package parser
4
5import (
6 "bytes"
7 "errors"
8 "fmt"
9 "strings"
10
11 "github.com/hashicorp/hcl/hcl/ast"
12 "github.com/hashicorp/hcl/hcl/scanner"
13 "github.com/hashicorp/hcl/hcl/token"
14)
15
16type Parser struct {
17 sc *scanner.Scanner
18
19 // Last read token
20 tok token.Token
21 commaPrev token.Token
22
23 comments []*ast.CommentGroup
24 leadComment *ast.CommentGroup // last lead comment
25 lineComment *ast.CommentGroup // last line comment
26
27 enableTrace bool
28 indent int
29 n int // buffer size (max = 1)
30}
31
32func newParser(src []byte) *Parser {
33 return &Parser{
34 sc: scanner.New(src),
35 }
36}
37
38// Parse returns the fully parsed source and returns the abstract syntax tree.
39func Parse(src []byte) (*ast.File, error) {
40 // normalize all line endings
41 // since the scanner and output only work with "\n" line endings, we may
42 // end up with dangling "\r" characters in the parsed data.
43 src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
44
45 p := newParser(src)
46 return p.Parse()
47}
48
49var errEofToken = errors.New("EOF token found")
50
51// Parse returns the fully parsed source and returns the abstract syntax tree.
52func (p *Parser) Parse() (*ast.File, error) {
53 f := &ast.File{}
54 var err, scerr error
55 p.sc.Error = func(pos token.Pos, msg string) {
56 scerr = &PosError{Pos: pos, Err: errors.New(msg)}
57 }
58
59 f.Node, err = p.objectList(false)
60 if scerr != nil {
61 return nil, scerr
62 }
63 if err != nil {
64 return nil, err
65 }
66
67 f.Comments = p.comments
68 return f, nil
69}
70
71// objectList parses a list of items within an object (generally k/v pairs).
72// The parameter" obj" tells this whether to we are within an object (braces:
73// '{', '}') or just at the top level. If we're within an object, we end
74// at an RBRACE.
75func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
76 defer un(trace(p, "ParseObjectList"))
77 node := &ast.ObjectList{}
78
79 for {
80 if obj {
81 tok := p.scan()
82 p.unscan()
83 if tok.Type == token.RBRACE {
84 break
85 }
86 }
87
88 n, err := p.objectItem()
89 if err == errEofToken {
90 break // we are finished
91 }
92
93 // we don't return a nil node, because might want to use already
94 // collected items.
95 if err != nil {
96 return node, err
97 }
98
99 node.Add(n)
100
101 // object lists can be optionally comma-delimited e.g. when a list of maps
102 // is being expressed, so a comma is allowed here - it's simply consumed
103 tok := p.scan()
104 if tok.Type != token.COMMA {
105 p.unscan()
106 }
107 }
108 return node, nil
109}
110
111func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
112 endline = p.tok.Pos.Line
113
114 // count the endline if it's multiline comment, ie starting with /*
115 if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
116 // don't use range here - no need to decode Unicode code points
117 for i := 0; i < len(p.tok.Text); i++ {
118 if p.tok.Text[i] == '\n' {
119 endline++
120 }
121 }
122 }
123
124 comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
125 p.tok = p.sc.Scan()
126 return
127}
128
129func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
130 var list []*ast.Comment
131 endline = p.tok.Pos.Line
132
133 for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
134 var comment *ast.Comment
135 comment, endline = p.consumeComment()
136 list = append(list, comment)
137 }
138
139 // add comment group to the comments list
140 comments = &ast.CommentGroup{List: list}
141 p.comments = append(p.comments, comments)
142
143 return
144}
145
146// objectItem parses a single object item
147func (p *Parser) objectItem() (*ast.ObjectItem, error) {
148 defer un(trace(p, "ParseObjectItem"))
149
150 keys, err := p.objectKey()
151 if len(keys) > 0 && err == errEofToken {
152 // We ignore eof token here since it is an error if we didn't
153 // receive a value (but we did receive a key) for the item.
154 err = nil
155 }
156 if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
157 // This is a strange boolean statement, but what it means is:
158 // We have keys with no value, and we're likely in an object
159 // (since RBrace ends an object). For this, we set err to nil so
160 // we continue and get the error below of having the wrong value
161 // type.
162 err = nil
163
164 // Reset the token type so we don't think it completed fine. See
165 // objectType which uses p.tok.Type to check if we're done with
166 // the object.
167 p.tok.Type = token.EOF
168 }
169 if err != nil {
170 return nil, err
171 }
172
173 o := &ast.ObjectItem{
174 Keys: keys,
175 }
176
177 if p.leadComment != nil {
178 o.LeadComment = p.leadComment
179 p.leadComment = nil
180 }
181
182 switch p.tok.Type {
183 case token.ASSIGN:
184 o.Assign = p.tok.Pos
185 o.Val, err = p.object()
186 if err != nil {
187 return nil, err
188 }
189 case token.LBRACE:
190 o.Val, err = p.objectType()
191 if err != nil {
192 return nil, err
193 }
194 default:
195 keyStr := make([]string, 0, len(keys))
196 for _, k := range keys {
197 keyStr = append(keyStr, k.Token.Text)
198 }
199
200 return nil, fmt.Errorf(
201 "key '%s' expected start of object ('{') or assignment ('=')",
202 strings.Join(keyStr, " "))
203 }
204
205 // do a look-ahead for line comment
206 p.scan()
207 if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
208 o.LineComment = p.lineComment
209 p.lineComment = nil
210 }
211 p.unscan()
212 return o, nil
213}
214
215// objectKey parses an object key and returns a ObjectKey AST
216func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
217 keyCount := 0
218 keys := make([]*ast.ObjectKey, 0)
219
220 for {
221 tok := p.scan()
222 switch tok.Type {
223 case token.EOF:
224 // It is very important to also return the keys here as well as
225 // the error. This is because we need to be able to tell if we
226 // did parse keys prior to finding the EOF, or if we just found
227 // a bare EOF.
228 return keys, errEofToken
229 case token.ASSIGN:
230 // assignment or object only, but not nested objects. this is not
231 // allowed: `foo bar = {}`
232 if keyCount > 1 {
233 return nil, &PosError{
234 Pos: p.tok.Pos,
235 Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
236 }
237 }
238
239 if keyCount == 0 {
240 return nil, &PosError{
241 Pos: p.tok.Pos,
242 Err: errors.New("no object keys found!"),
243 }
244 }
245
246 return keys, nil
247 case token.LBRACE:
248 var err error
249
250 // If we have no keys, then it is a syntax error. i.e. {{}} is not
251 // allowed.
252 if len(keys) == 0 {
253 err = &PosError{
254 Pos: p.tok.Pos,
255 Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
256 }
257 }
258
259 // object
260 return keys, err
261 case token.IDENT, token.STRING:
262 keyCount++
263 keys = append(keys, &ast.ObjectKey{Token: p.tok})
264 case token.ILLEGAL:
265 return keys, &PosError{
266 Pos: p.tok.Pos,
267 Err: fmt.Errorf("illegal character"),
268 }
269 default:
270 return keys, &PosError{
271 Pos: p.tok.Pos,
272 Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
273 }
274 }
275 }
276}
277
278// object parses any type of object, such as number, bool, string, object or
279// list.
280func (p *Parser) object() (ast.Node, error) {
281 defer un(trace(p, "ParseType"))
282 tok := p.scan()
283
284 switch tok.Type {
285 case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
286 return p.literalType()
287 case token.LBRACE:
288 return p.objectType()
289 case token.LBRACK:
290 return p.listType()
291 case token.COMMENT:
292 // implement comment
293 case token.EOF:
294 return nil, errEofToken
295 }
296
297 return nil, &PosError{
298 Pos: tok.Pos,
299 Err: fmt.Errorf("Unknown token: %+v", tok),
300 }
301}
302
303// objectType parses an object type and returns a ObjectType AST
304func (p *Parser) objectType() (*ast.ObjectType, error) {
305 defer un(trace(p, "ParseObjectType"))
306
307 // we assume that the currently scanned token is a LBRACE
308 o := &ast.ObjectType{
309 Lbrace: p.tok.Pos,
310 }
311
312 l, err := p.objectList(true)
313
314 // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
315 // not a RBRACE, it's an syntax error and we just return it.
316 if err != nil && p.tok.Type != token.RBRACE {
317 return nil, err
318 }
319
320 // No error, scan and expect the ending to be a brace
321 if tok := p.scan(); tok.Type != token.RBRACE {
322 return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type)
323 }
324
325 o.List = l
326 o.Rbrace = p.tok.Pos // advanced via parseObjectList
327 return o, nil
328}
329
330// listType parses a list type and returns a ListType AST
331func (p *Parser) listType() (*ast.ListType, error) {
332 defer un(trace(p, "ParseListType"))
333
334 // we assume that the currently scanned token is a LBRACK
335 l := &ast.ListType{
336 Lbrack: p.tok.Pos,
337 }
338
339 needComma := false
340 for {
341 tok := p.scan()
342 if needComma {
343 switch tok.Type {
344 case token.COMMA, token.RBRACK:
345 default:
346 return nil, &PosError{
347 Pos: tok.Pos,
348 Err: fmt.Errorf(
349 "error parsing list, expected comma or list end, got: %s",
350 tok.Type),
351 }
352 }
353 }
354 switch tok.Type {
355 case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
356 node, err := p.literalType()
357 if err != nil {
358 return nil, err
359 }
360
361 // If there is a lead comment, apply it
362 if p.leadComment != nil {
363 node.LeadComment = p.leadComment
364 p.leadComment = nil
365 }
366
367 l.Add(node)
368 needComma = true
369 case token.COMMA:
370 // get next list item or we are at the end
371 // do a look-ahead for line comment
372 p.scan()
373 if p.lineComment != nil && len(l.List) > 0 {
374 lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
375 if ok {
376 lit.LineComment = p.lineComment
377 l.List[len(l.List)-1] = lit
378 p.lineComment = nil
379 }
380 }
381 p.unscan()
382
383 needComma = false
384 continue
385 case token.LBRACE:
386 // Looks like a nested object, so parse it out
387 node, err := p.objectType()
388 if err != nil {
389 return nil, &PosError{
390 Pos: tok.Pos,
391 Err: fmt.Errorf(
392 "error while trying to parse object within list: %s", err),
393 }
394 }
395 l.Add(node)
396 needComma = true
397 case token.LBRACK:
398 node, err := p.listType()
399 if err != nil {
400 return nil, &PosError{
401 Pos: tok.Pos,
402 Err: fmt.Errorf(
403 "error while trying to parse list within list: %s", err),
404 }
405 }
406 l.Add(node)
407 case token.RBRACK:
408 // finished
409 l.Rbrack = p.tok.Pos
410 return l, nil
411 default:
412 return nil, &PosError{
413 Pos: tok.Pos,
414 Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
415 }
416 }
417 }
418}
419
420// literalType parses a literal type and returns a LiteralType AST
421func (p *Parser) literalType() (*ast.LiteralType, error) {
422 defer un(trace(p, "ParseLiteral"))
423
424 return &ast.LiteralType{
425 Token: p.tok,
426 }, nil
427}
428
429// scan returns the next token from the underlying scanner. If a token has
430// been unscanned then read that instead. In the process, it collects any
431// comment groups encountered, and remembers the last lead and line comments.
432func (p *Parser) scan() token.Token {
433 // If we have a token on the buffer, then return it.
434 if p.n != 0 {
435 p.n = 0
436 return p.tok
437 }
438
439 // Otherwise read the next token from the scanner and Save it to the buffer
440 // in case we unscan later.
441 prev := p.tok
442 p.tok = p.sc.Scan()
443
444 if p.tok.Type == token.COMMENT {
445 var comment *ast.CommentGroup
446 var endline int
447
448 // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
449 // p.tok.Pos.Line, prev.Pos.Line, endline)
450 if p.tok.Pos.Line == prev.Pos.Line {
451 // The comment is on same line as the previous token; it
452 // cannot be a lead comment but may be a line comment.
453 comment, endline = p.consumeCommentGroup(0)
454 if p.tok.Pos.Line != endline {
455 // The next token is on a different line, thus
456 // the last comment group is a line comment.
457 p.lineComment = comment
458 }
459 }
460
461 // consume successor comments, if any
462 endline = -1
463 for p.tok.Type == token.COMMENT {
464 comment, endline = p.consumeCommentGroup(1)
465 }
466
467 if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
468 switch p.tok.Type {
469 case token.RBRACE, token.RBRACK:
470 // Do not count for these cases
471 default:
472 // The next token is following on the line immediately after the
473 // comment group, thus the last comment group is a lead comment.
474 p.leadComment = comment
475 }
476 }
477
478 }
479
480 return p.tok
481}
482
483// unscan pushes the previously read token back onto the buffer.
484func (p *Parser) unscan() {
485 p.n = 1
486}
487
488// ----------------------------------------------------------------------------
489// Parsing support
490
491func (p *Parser) printTrace(a ...interface{}) {
492 if !p.enableTrace {
493 return
494 }
495
496 const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
497 const n = len(dots)
498 fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
499
500 i := 2 * p.indent
501 for i > n {
502 fmt.Print(dots)
503 i -= n
504 }
505 // i <= n
506 fmt.Print(dots[0:i])
507 fmt.Println(a...)
508}
509
510func trace(p *Parser, msg string) *Parser {
511 p.printTrace(msg, "(")
512 p.indent++
513 return p
514}
515
516// Usage pattern: defer un(trace(p, "..."))
517func un(p *Parser) {
518 p.indent--
519 p.printTrace(")")
520}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
new file mode 100644
index 0000000..6966236
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
@@ -0,0 +1,651 @@
1// Package scanner implements a scanner for HCL (HashiCorp Configuration
2// Language) source text.
3package scanner
4
5import (
6 "bytes"
7 "fmt"
8 "os"
9 "regexp"
10 "unicode"
11 "unicode/utf8"
12
13 "github.com/hashicorp/hcl/hcl/token"
14)
15
16// eof represents a marker rune for the end of the reader.
17const eof = rune(0)
18
19// Scanner defines a lexical scanner
20type Scanner struct {
21 buf *bytes.Buffer // Source buffer for advancing and scanning
22 src []byte // Source buffer for immutable access
23
24 // Source Position
25 srcPos token.Pos // current position
26 prevPos token.Pos // previous position, used for peek() method
27
28 lastCharLen int // length of last character in bytes
29 lastLineLen int // length of last line in characters (for correct column reporting)
30
31 tokStart int // token text start position
32 tokEnd int // token text end position
33
34 // Error is called for each error encountered. If no Error
35 // function is set, the error is reported to os.Stderr.
36 Error func(pos token.Pos, msg string)
37
38 // ErrorCount is incremented by one for each error encountered.
39 ErrorCount int
40
41 // tokPos is the start position of most recently scanned token; set by
42 // Scan. The Filename field is always left untouched by the Scanner. If
43 // an error is reported (via Error) and Position is invalid, the scanner is
44 // not inside a token.
45 tokPos token.Pos
46}
47
48// New creates and initializes a new instance of Scanner using src as
49// its source content.
50func New(src []byte) *Scanner {
51 // even though we accept a src, we read from a io.Reader compatible type
52 // (*bytes.Buffer). So in the future we might easily change it to streaming
53 // read.
54 b := bytes.NewBuffer(src)
55 s := &Scanner{
56 buf: b,
57 src: src,
58 }
59
60 // srcPosition always starts with 1
61 s.srcPos.Line = 1
62 return s
63}
64
65// next reads the next rune from the bufferred reader. Returns the rune(0) if
66// an error occurs (or io.EOF is returned).
67func (s *Scanner) next() rune {
68 ch, size, err := s.buf.ReadRune()
69 if err != nil {
70 // advance for error reporting
71 s.srcPos.Column++
72 s.srcPos.Offset += size
73 s.lastCharLen = size
74 return eof
75 }
76
77 if ch == utf8.RuneError && size == 1 {
78 s.srcPos.Column++
79 s.srcPos.Offset += size
80 s.lastCharLen = size
81 s.err("illegal UTF-8 encoding")
82 return ch
83 }
84
85 // remember last position
86 s.prevPos = s.srcPos
87
88 s.srcPos.Column++
89 s.lastCharLen = size
90 s.srcPos.Offset += size
91
92 if ch == '\n' {
93 s.srcPos.Line++
94 s.lastLineLen = s.srcPos.Column
95 s.srcPos.Column = 0
96 }
97
98 // If we see a null character with data left, then that is an error
99 if ch == '\x00' && s.buf.Len() > 0 {
100 s.err("unexpected null character (0x00)")
101 return eof
102 }
103
104 // debug
105 // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
106 return ch
107}
108
109// unread unreads the previous read Rune and updates the source position
110func (s *Scanner) unread() {
111 if err := s.buf.UnreadRune(); err != nil {
112 panic(err) // this is user fault, we should catch it
113 }
114 s.srcPos = s.prevPos // put back last position
115}
116
117// peek returns the next rune without advancing the reader.
118func (s *Scanner) peek() rune {
119 peek, _, err := s.buf.ReadRune()
120 if err != nil {
121 return eof
122 }
123
124 s.buf.UnreadRune()
125 return peek
126}
127
128// Scan scans the next token and returns the token.
129func (s *Scanner) Scan() token.Token {
130 ch := s.next()
131
132 // skip white space
133 for isWhitespace(ch) {
134 ch = s.next()
135 }
136
137 var tok token.Type
138
139 // token text markings
140 s.tokStart = s.srcPos.Offset - s.lastCharLen
141
142 // token position, initial next() is moving the offset by one(size of rune
143 // actually), though we are interested with the starting point
144 s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
145 if s.srcPos.Column > 0 {
146 // common case: last character was not a '\n'
147 s.tokPos.Line = s.srcPos.Line
148 s.tokPos.Column = s.srcPos.Column
149 } else {
150 // last character was a '\n'
151 // (we cannot be at the beginning of the source
152 // since we have called next() at least once)
153 s.tokPos.Line = s.srcPos.Line - 1
154 s.tokPos.Column = s.lastLineLen
155 }
156
157 switch {
158 case isLetter(ch):
159 tok = token.IDENT
160 lit := s.scanIdentifier()
161 if lit == "true" || lit == "false" {
162 tok = token.BOOL
163 }
164 case isDecimal(ch):
165 tok = s.scanNumber(ch)
166 default:
167 switch ch {
168 case eof:
169 tok = token.EOF
170 case '"':
171 tok = token.STRING
172 s.scanString()
173 case '#', '/':
174 tok = token.COMMENT
175 s.scanComment(ch)
176 case '.':
177 tok = token.PERIOD
178 ch = s.peek()
179 if isDecimal(ch) {
180 tok = token.FLOAT
181 ch = s.scanMantissa(ch)
182 ch = s.scanExponent(ch)
183 }
184 case '<':
185 tok = token.HEREDOC
186 s.scanHeredoc()
187 case '[':
188 tok = token.LBRACK
189 case ']':
190 tok = token.RBRACK
191 case '{':
192 tok = token.LBRACE
193 case '}':
194 tok = token.RBRACE
195 case ',':
196 tok = token.COMMA
197 case '=':
198 tok = token.ASSIGN
199 case '+':
200 tok = token.ADD
201 case '-':
202 if isDecimal(s.peek()) {
203 ch := s.next()
204 tok = s.scanNumber(ch)
205 } else {
206 tok = token.SUB
207 }
208 default:
209 s.err("illegal char")
210 }
211 }
212
213 // finish token ending
214 s.tokEnd = s.srcPos.Offset
215
216 // create token literal
217 var tokenText string
218 if s.tokStart >= 0 {
219 tokenText = string(s.src[s.tokStart:s.tokEnd])
220 }
221 s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
222
223 return token.Token{
224 Type: tok,
225 Pos: s.tokPos,
226 Text: tokenText,
227 }
228}
229
230func (s *Scanner) scanComment(ch rune) {
231 // single line comments
232 if ch == '#' || (ch == '/' && s.peek() != '*') {
233 if ch == '/' && s.peek() != '/' {
234 s.err("expected '/' for comment")
235 return
236 }
237
238 ch = s.next()
239 for ch != '\n' && ch >= 0 && ch != eof {
240 ch = s.next()
241 }
242 if ch != eof && ch >= 0 {
243 s.unread()
244 }
245 return
246 }
247
248 // be sure we get the character after /* This allows us to find comment's
249 // that are not erminated
250 if ch == '/' {
251 s.next()
252 ch = s.next() // read character after "/*"
253 }
254
255 // look for /* - style comments
256 for {
257 if ch < 0 || ch == eof {
258 s.err("comment not terminated")
259 break
260 }
261
262 ch0 := ch
263 ch = s.next()
264 if ch0 == '*' && ch == '/' {
265 break
266 }
267 }
268}
269
270// scanNumber scans a HCL number definition starting with the given rune
271func (s *Scanner) scanNumber(ch rune) token.Type {
272 if ch == '0' {
273 // check for hexadecimal, octal or float
274 ch = s.next()
275 if ch == 'x' || ch == 'X' {
276 // hexadecimal
277 ch = s.next()
278 found := false
279 for isHexadecimal(ch) {
280 ch = s.next()
281 found = true
282 }
283
284 if !found {
285 s.err("illegal hexadecimal number")
286 }
287
288 if ch != eof {
289 s.unread()
290 }
291
292 return token.NUMBER
293 }
294
295 // now it's either something like: 0421(octal) or 0.1231(float)
296 illegalOctal := false
297 for isDecimal(ch) {
298 ch = s.next()
299 if ch == '8' || ch == '9' {
300 // this is just a possibility. For example 0159 is illegal, but
301 // 0159.23 is valid. So we mark a possible illegal octal. If
302 // the next character is not a period, we'll print the error.
303 illegalOctal = true
304 }
305 }
306
307 if ch == 'e' || ch == 'E' {
308 ch = s.scanExponent(ch)
309 return token.FLOAT
310 }
311
312 if ch == '.' {
313 ch = s.scanFraction(ch)
314
315 if ch == 'e' || ch == 'E' {
316 ch = s.next()
317 ch = s.scanExponent(ch)
318 }
319 return token.FLOAT
320 }
321
322 if illegalOctal {
323 s.err("illegal octal number")
324 }
325
326 if ch != eof {
327 s.unread()
328 }
329 return token.NUMBER
330 }
331
332 s.scanMantissa(ch)
333 ch = s.next() // seek forward
334 if ch == 'e' || ch == 'E' {
335 ch = s.scanExponent(ch)
336 return token.FLOAT
337 }
338
339 if ch == '.' {
340 ch = s.scanFraction(ch)
341 if ch == 'e' || ch == 'E' {
342 ch = s.next()
343 ch = s.scanExponent(ch)
344 }
345 return token.FLOAT
346 }
347
348 if ch != eof {
349 s.unread()
350 }
351 return token.NUMBER
352}
353
354// scanMantissa scans the mantissa begining from the rune. It returns the next
355// non decimal rune. It's used to determine wheter it's a fraction or exponent.
356func (s *Scanner) scanMantissa(ch rune) rune {
357 scanned := false
358 for isDecimal(ch) {
359 ch = s.next()
360 scanned = true
361 }
362
363 if scanned && ch != eof {
364 s.unread()
365 }
366 return ch
367}
368
369// scanFraction scans the fraction after the '.' rune
370func (s *Scanner) scanFraction(ch rune) rune {
371 if ch == '.' {
372 ch = s.peek() // we peek just to see if we can move forward
373 ch = s.scanMantissa(ch)
374 }
375 return ch
376}
377
378// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
379// rune.
380func (s *Scanner) scanExponent(ch rune) rune {
381 if ch == 'e' || ch == 'E' {
382 ch = s.next()
383 if ch == '-' || ch == '+' {
384 ch = s.next()
385 }
386 ch = s.scanMantissa(ch)
387 }
388 return ch
389}
390
391// scanHeredoc scans a heredoc string
392func (s *Scanner) scanHeredoc() {
393 // Scan the second '<' in example: '<<EOF'
394 if s.next() != '<' {
395 s.err("heredoc expected second '<', didn't see it")
396 return
397 }
398
399 // Get the original offset so we can read just the heredoc ident
400 offs := s.srcPos.Offset
401
402 // Scan the identifier
403 ch := s.next()
404
405 // Indented heredoc syntax
406 if ch == '-' {
407 ch = s.next()
408 }
409
410 for isLetter(ch) || isDigit(ch) {
411 ch = s.next()
412 }
413
414 // If we reached an EOF then that is not good
415 if ch == eof {
416 s.err("heredoc not terminated")
417 return
418 }
419
420 // Ignore the '\r' in Windows line endings
421 if ch == '\r' {
422 if s.peek() == '\n' {
423 ch = s.next()
424 }
425 }
426
427 // If we didn't reach a newline then that is also not good
428 if ch != '\n' {
429 s.err("invalid characters in heredoc anchor")
430 return
431 }
432
433 // Read the identifier
434 identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
435 if len(identBytes) == 0 {
436 s.err("zero-length heredoc anchor")
437 return
438 }
439
440 var identRegexp *regexp.Regexp
441 if identBytes[0] == '-' {
442 identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
443 } else {
444 identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
445 }
446
447 // Read the actual string value
448 lineStart := s.srcPos.Offset
449 for {
450 ch := s.next()
451
452 // Special newline handling.
453 if ch == '\n' {
454 // Math is fast, so we first compare the byte counts to see if we have a chance
455 // of seeing the same identifier - if the length is less than the number of bytes
456 // in the identifier, this cannot be a valid terminator.
457 lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
458 if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
459 break
460 }
461
462 // Not an anchor match, record the start of a new line
463 lineStart = s.srcPos.Offset
464 }
465
466 if ch == eof {
467 s.err("heredoc not terminated")
468 return
469 }
470 }
471
472 return
473}
474
475// scanString scans a quoted string
476func (s *Scanner) scanString() {
477 braces := 0
478 for {
479 // '"' opening already consumed
480 // read character after quote
481 ch := s.next()
482
483 if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
484 s.err("literal not terminated")
485 return
486 }
487
488 if ch == '"' && braces == 0 {
489 break
490 }
491
492 // If we're going into a ${} then we can ignore quotes for awhile
493 if braces == 0 && ch == '$' && s.peek() == '{' {
494 braces++
495 s.next()
496 } else if braces > 0 && ch == '{' {
497 braces++
498 }
499 if braces > 0 && ch == '}' {
500 braces--
501 }
502
503 if ch == '\\' {
504 s.scanEscape()
505 }
506 }
507
508 return
509}
510
511// scanEscape scans an escape sequence
512func (s *Scanner) scanEscape() rune {
513 // http://en.cppreference.com/w/cpp/language/escape
514 ch := s.next() // read character after '/'
515 switch ch {
516 case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
517 // nothing to do
518 case '0', '1', '2', '3', '4', '5', '6', '7':
519 // octal notation
520 ch = s.scanDigits(ch, 8, 3)
521 case 'x':
522 // hexademical notation
523 ch = s.scanDigits(s.next(), 16, 2)
524 case 'u':
525 // universal character name
526 ch = s.scanDigits(s.next(), 16, 4)
527 case 'U':
528 // universal character name
529 ch = s.scanDigits(s.next(), 16, 8)
530 default:
531 s.err("illegal char escape")
532 }
533 return ch
534}
535
536// scanDigits scans a rune with the given base for n times. For example an
537// octal notation \184 would yield in scanDigits(ch, 8, 3)
538func (s *Scanner) scanDigits(ch rune, base, n int) rune {
539 start := n
540 for n > 0 && digitVal(ch) < base {
541 ch = s.next()
542 if ch == eof {
543 // If we see an EOF, we halt any more scanning of digits
544 // immediately.
545 break
546 }
547
548 n--
549 }
550 if n > 0 {
551 s.err("illegal char escape")
552 }
553
554 if n != start {
555 // we scanned all digits, put the last non digit char back,
556 // only if we read anything at all
557 s.unread()
558 }
559
560 return ch
561}
562
563// scanIdentifier scans an identifier and returns the literal string
564func (s *Scanner) scanIdentifier() string {
565 offs := s.srcPos.Offset - s.lastCharLen
566 ch := s.next()
567 for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
568 ch = s.next()
569 }
570
571 if ch != eof {
572 s.unread() // we got identifier, put back latest char
573 }
574
575 return string(s.src[offs:s.srcPos.Offset])
576}
577
578// recentPosition returns the position of the character immediately after the
579// character or token returned by the last call to Scan.
580func (s *Scanner) recentPosition() (pos token.Pos) {
581 pos.Offset = s.srcPos.Offset - s.lastCharLen
582 switch {
583 case s.srcPos.Column > 0:
584 // common case: last character was not a '\n'
585 pos.Line = s.srcPos.Line
586 pos.Column = s.srcPos.Column
587 case s.lastLineLen > 0:
588 // last character was a '\n'
589 // (we cannot be at the beginning of the source
590 // since we have called next() at least once)
591 pos.Line = s.srcPos.Line - 1
592 pos.Column = s.lastLineLen
593 default:
594 // at the beginning of the source
595 pos.Line = 1
596 pos.Column = 1
597 }
598 return
599}
600
601// err prints the error of any scanning to s.Error function. If the function is
602// not defined, by default it prints them to os.Stderr
603func (s *Scanner) err(msg string) {
604 s.ErrorCount++
605 pos := s.recentPosition()
606
607 if s.Error != nil {
608 s.Error(pos, msg)
609 return
610 }
611
612 fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
613}
614
615// isHexadecimal returns true if the given rune is a letter
616func isLetter(ch rune) bool {
617 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
618}
619
620// isDigit returns true if the given rune is a decimal digit
621func isDigit(ch rune) bool {
622 return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
623}
624
625// isDecimal returns true if the given rune is a decimal number
626func isDecimal(ch rune) bool {
627 return '0' <= ch && ch <= '9'
628}
629
630// isHexadecimal returns true if the given rune is an hexadecimal number
631func isHexadecimal(ch rune) bool {
632 return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
633}
634
635// isWhitespace returns true if the rune is a space, tab, newline or carriage return
636func isWhitespace(ch rune) bool {
637 return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
638}
639
640// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
641func digitVal(ch rune) int {
642 switch {
643 case '0' <= ch && ch <= '9':
644 return int(ch - '0')
645 case 'a' <= ch && ch <= 'f':
646 return int(ch - 'a' + 10)
647 case 'A' <= ch && ch <= 'F':
648 return int(ch - 'A' + 10)
649 }
650 return 16 // larger than any legal digit val
651}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
new file mode 100644
index 0000000..5f981ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
@@ -0,0 +1,241 @@
1package strconv
2
3import (
4 "errors"
5 "unicode/utf8"
6)
7
8// ErrSyntax indicates that a value does not have the right syntax for the target type.
9var ErrSyntax = errors.New("invalid syntax")
10
11// Unquote interprets s as a single-quoted, double-quoted,
12// or backquoted Go string literal, returning the string value
13// that s quotes. (If s is single-quoted, it would be a Go
14// character literal; Unquote returns the corresponding
15// one-character string.)
16func Unquote(s string) (t string, err error) {
17 n := len(s)
18 if n < 2 {
19 return "", ErrSyntax
20 }
21 quote := s[0]
22 if quote != s[n-1] {
23 return "", ErrSyntax
24 }
25 s = s[1 : n-1]
26
27 if quote != '"' {
28 return "", ErrSyntax
29 }
30 if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
31 return "", ErrSyntax
32 }
33
34 // Is it trivial? Avoid allocation.
35 if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
36 switch quote {
37 case '"':
38 return s, nil
39 case '\'':
40 r, size := utf8.DecodeRuneInString(s)
41 if size == len(s) && (r != utf8.RuneError || size != 1) {
42 return s, nil
43 }
44 }
45 }
46
47 var runeTmp [utf8.UTFMax]byte
48 buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
49 for len(s) > 0 {
50 // If we're starting a '${}' then let it through un-unquoted.
51 // Specifically: we don't unquote any characters within the `${}`
52 // section.
53 if s[0] == '$' && len(s) > 1 && s[1] == '{' {
54 buf = append(buf, '$', '{')
55 s = s[2:]
56
57 // Continue reading until we find the closing brace, copying as-is
58 braces := 1
59 for len(s) > 0 && braces > 0 {
60 r, size := utf8.DecodeRuneInString(s)
61 if r == utf8.RuneError {
62 return "", ErrSyntax
63 }
64
65 s = s[size:]
66
67 n := utf8.EncodeRune(runeTmp[:], r)
68 buf = append(buf, runeTmp[:n]...)
69
70 switch r {
71 case '{':
72 braces++
73 case '}':
74 braces--
75 }
76 }
77 if braces != 0 {
78 return "", ErrSyntax
79 }
80 if len(s) == 0 {
81 // If there's no string left, we're done!
82 break
83 } else {
84 // If there's more left, we need to pop back up to the top of the loop
85 // in case there's another interpolation in this string.
86 continue
87 }
88 }
89
90 if s[0] == '\n' {
91 return "", ErrSyntax
92 }
93
94 c, multibyte, ss, err := unquoteChar(s, quote)
95 if err != nil {
96 return "", err
97 }
98 s = ss
99 if c < utf8.RuneSelf || !multibyte {
100 buf = append(buf, byte(c))
101 } else {
102 n := utf8.EncodeRune(runeTmp[:], c)
103 buf = append(buf, runeTmp[:n]...)
104 }
105 if quote == '\'' && len(s) != 0 {
106 // single-quoted must be single character
107 return "", ErrSyntax
108 }
109 }
110 return string(buf), nil
111}
112
113// contains reports whether the string contains the byte c.
114func contains(s string, c byte) bool {
115 for i := 0; i < len(s); i++ {
116 if s[i] == c {
117 return true
118 }
119 }
120 return false
121}
122
123func unhex(b byte) (v rune, ok bool) {
124 c := rune(b)
125 switch {
126 case '0' <= c && c <= '9':
127 return c - '0', true
128 case 'a' <= c && c <= 'f':
129 return c - 'a' + 10, true
130 case 'A' <= c && c <= 'F':
131 return c - 'A' + 10, true
132 }
133 return
134}
135
136func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
137 // easy cases
138 switch c := s[0]; {
139 case c == quote && (quote == '\'' || quote == '"'):
140 err = ErrSyntax
141 return
142 case c >= utf8.RuneSelf:
143 r, size := utf8.DecodeRuneInString(s)
144 return r, true, s[size:], nil
145 case c != '\\':
146 return rune(s[0]), false, s[1:], nil
147 }
148
149 // hard case: c is backslash
150 if len(s) <= 1 {
151 err = ErrSyntax
152 return
153 }
154 c := s[1]
155 s = s[2:]
156
157 switch c {
158 case 'a':
159 value = '\a'
160 case 'b':
161 value = '\b'
162 case 'f':
163 value = '\f'
164 case 'n':
165 value = '\n'
166 case 'r':
167 value = '\r'
168 case 't':
169 value = '\t'
170 case 'v':
171 value = '\v'
172 case 'x', 'u', 'U':
173 n := 0
174 switch c {
175 case 'x':
176 n = 2
177 case 'u':
178 n = 4
179 case 'U':
180 n = 8
181 }
182 var v rune
183 if len(s) < n {
184 err = ErrSyntax
185 return
186 }
187 for j := 0; j < n; j++ {
188 x, ok := unhex(s[j])
189 if !ok {
190 err = ErrSyntax
191 return
192 }
193 v = v<<4 | x
194 }
195 s = s[n:]
196 if c == 'x' {
197 // single-byte string, possibly not UTF-8
198 value = v
199 break
200 }
201 if v > utf8.MaxRune {
202 err = ErrSyntax
203 return
204 }
205 value = v
206 multibyte = true
207 case '0', '1', '2', '3', '4', '5', '6', '7':
208 v := rune(c) - '0'
209 if len(s) < 2 {
210 err = ErrSyntax
211 return
212 }
213 for j := 0; j < 2; j++ { // one digit already; two more
214 x := rune(s[j]) - '0'
215 if x < 0 || x > 7 {
216 err = ErrSyntax
217 return
218 }
219 v = (v << 3) | x
220 }
221 s = s[2:]
222 if v > 255 {
223 err = ErrSyntax
224 return
225 }
226 value = v
227 case '\\':
228 value = '\\'
229 case '\'', '"':
230 if c != quote {
231 err = ErrSyntax
232 return
233 }
234 value = rune(c)
235 default:
236 err = ErrSyntax
237 return
238 }
239 tail = s
240 return
241}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
new file mode 100644
index 0000000..59c1bb7
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
@@ -0,0 +1,46 @@
1package token
2
3import "fmt"
4
5// Pos describes an arbitrary source position
6// including the file, line, and column location.
7// A Position is valid if the line number is > 0.
8type Pos struct {
9 Filename string // filename, if any
10 Offset int // offset, starting at 0
11 Line int // line number, starting at 1
12 Column int // column number, starting at 1 (character count)
13}
14
15// IsValid returns true if the position is valid.
16func (p *Pos) IsValid() bool { return p.Line > 0 }
17
18// String returns a string in one of several forms:
19//
20// file:line:column valid position with file name
21// line:column valid position without file name
22// file invalid position with file name
23// - invalid position without file name
24func (p Pos) String() string {
25 s := p.Filename
26 if p.IsValid() {
27 if s != "" {
28 s += ":"
29 }
30 s += fmt.Sprintf("%d:%d", p.Line, p.Column)
31 }
32 if s == "" {
33 s = "-"
34 }
35 return s
36}
37
38// Before reports whether the position p is before u.
39func (p Pos) Before(u Pos) bool {
40 return u.Offset > p.Offset || u.Line > p.Line
41}
42
43// After reports whether the position p is after u.
44func (p Pos) After(u Pos) bool {
45 return u.Offset < p.Offset || u.Line < p.Line
46}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
new file mode 100644
index 0000000..e37c066
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
@@ -0,0 +1,219 @@
1// Package token defines constants representing the lexical tokens for HCL
2// (HashiCorp Configuration Language)
3package token
4
5import (
6 "fmt"
7 "strconv"
8 "strings"
9
10 hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
11)
12
13// Token defines a single HCL token which can be obtained via the Scanner
14type Token struct {
15 Type Type
16 Pos Pos
17 Text string
18 JSON bool
19}
20
21// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
22type Type int
23
24const (
25 // Special tokens
26 ILLEGAL Type = iota
27 EOF
28 COMMENT
29
30 identifier_beg
31 IDENT // literals
32 literal_beg
33 NUMBER // 12345
34 FLOAT // 123.45
35 BOOL // true,false
36 STRING // "abc"
37 HEREDOC // <<FOO\nbar\nFOO
38 literal_end
39 identifier_end
40
41 operator_beg
42 LBRACK // [
43 LBRACE // {
44 COMMA // ,
45 PERIOD // .
46
47 RBRACK // ]
48 RBRACE // }
49
50 ASSIGN // =
51 ADD // +
52 SUB // -
53 operator_end
54)
55
56var tokens = [...]string{
57 ILLEGAL: "ILLEGAL",
58
59 EOF: "EOF",
60 COMMENT: "COMMENT",
61
62 IDENT: "IDENT",
63 NUMBER: "NUMBER",
64 FLOAT: "FLOAT",
65 BOOL: "BOOL",
66 STRING: "STRING",
67
68 LBRACK: "LBRACK",
69 LBRACE: "LBRACE",
70 COMMA: "COMMA",
71 PERIOD: "PERIOD",
72 HEREDOC: "HEREDOC",
73
74 RBRACK: "RBRACK",
75 RBRACE: "RBRACE",
76
77 ASSIGN: "ASSIGN",
78 ADD: "ADD",
79 SUB: "SUB",
80}
81
82// String returns the string corresponding to the token tok.
83func (t Type) String() string {
84 s := ""
85 if 0 <= t && t < Type(len(tokens)) {
86 s = tokens[t]
87 }
88 if s == "" {
89 s = "token(" + strconv.Itoa(int(t)) + ")"
90 }
91 return s
92}
93
94// IsIdentifier returns true for tokens corresponding to identifiers and basic
95// type literals; it returns false otherwise.
96func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
97
98// IsLiteral returns true for tokens corresponding to basic type literals; it
99// returns false otherwise.
100func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
101
102// IsOperator returns true for tokens corresponding to operators and
103// delimiters; it returns false otherwise.
104func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
105
106// String returns the token's literal text. Note that this is only
107// applicable for certain token types, such as token.IDENT,
108// token.STRING, etc..
109func (t Token) String() string {
110 return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
111}
112
113// Value returns the properly typed value for this token. The type of
114// the returned interface{} is guaranteed based on the Type field.
115//
116// This can only be called for literal types. If it is called for any other
117// type, this will panic.
118func (t Token) Value() interface{} {
119 switch t.Type {
120 case BOOL:
121 if t.Text == "true" {
122 return true
123 } else if t.Text == "false" {
124 return false
125 }
126
127 panic("unknown bool value: " + t.Text)
128 case FLOAT:
129 v, err := strconv.ParseFloat(t.Text, 64)
130 if err != nil {
131 panic(err)
132 }
133
134 return float64(v)
135 case NUMBER:
136 v, err := strconv.ParseInt(t.Text, 0, 64)
137 if err != nil {
138 panic(err)
139 }
140
141 return int64(v)
142 case IDENT:
143 return t.Text
144 case HEREDOC:
145 return unindentHeredoc(t.Text)
146 case STRING:
147 // Determine the Unquote method to use. If it came from JSON,
148 // then we need to use the built-in unquote since we have to
149 // escape interpolations there.
150 f := hclstrconv.Unquote
151 if t.JSON {
152 f = strconv.Unquote
153 }
154
155 // This case occurs if json null is used
156 if t.Text == "" {
157 return ""
158 }
159
160 v, err := f(t.Text)
161 if err != nil {
162 panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
163 }
164
165 return v
166 default:
167 panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
168 }
169}
170
171// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
172// and the content of a HEREDOC with the hanging indent removed if it is started with
173// a <<-, and the terminating line is at least as indented as the least indented line.
174func unindentHeredoc(heredoc string) string {
175 // We need to find the end of the marker
176 idx := strings.IndexByte(heredoc, '\n')
177 if idx == -1 {
178 panic("heredoc doesn't contain newline")
179 }
180
181 unindent := heredoc[2] == '-'
182
183 // We can optimize if the heredoc isn't marked for indentation
184 if !unindent {
185 return string(heredoc[idx+1 : len(heredoc)-idx+1])
186 }
187
188 // We need to unindent each line based on the indentation level of the marker
189 lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
190 whitespacePrefix := lines[len(lines)-1]
191
192 isIndented := true
193 for _, v := range lines {
194 if strings.HasPrefix(v, whitespacePrefix) {
195 continue
196 }
197
198 isIndented = false
199 break
200 }
201
202 // If all lines are not at least as indented as the terminating mark, return the
203 // heredoc as is, but trim the leading space from the marker on the final line.
204 if !isIndented {
205 return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
206 }
207
208 unindentedLines := make([]string, len(lines))
209 for k, v := range lines {
210 if k == len(lines)-1 {
211 unindentedLines[k] = ""
212 break
213 }
214
215 unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
216 }
217
218 return strings.Join(unindentedLines, "\n")
219}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
new file mode 100644
index 0000000..f652d6f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
@@ -0,0 +1,117 @@
1package parser
2
3import "github.com/hashicorp/hcl/hcl/ast"
4
5// flattenObjects takes an AST node, walks it, and flattens
6func flattenObjects(node ast.Node) {
7 ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
8 // We only care about lists, because this is what we modify
9 list, ok := n.(*ast.ObjectList)
10 if !ok {
11 return n, true
12 }
13
14 // Rebuild the item list
15 items := make([]*ast.ObjectItem, 0, len(list.Items))
16 frontier := make([]*ast.ObjectItem, len(list.Items))
17 copy(frontier, list.Items)
18 for len(frontier) > 0 {
19 // Pop the current item
20 n := len(frontier)
21 item := frontier[n-1]
22 frontier = frontier[:n-1]
23
24 switch v := item.Val.(type) {
25 case *ast.ObjectType:
26 items, frontier = flattenObjectType(v, item, items, frontier)
27 case *ast.ListType:
28 items, frontier = flattenListType(v, item, items, frontier)
29 default:
30 items = append(items, item)
31 }
32 }
33
34 // Reverse the list since the frontier model runs things backwards
35 for i := len(items)/2 - 1; i >= 0; i-- {
36 opp := len(items) - 1 - i
37 items[i], items[opp] = items[opp], items[i]
38 }
39
40 // Done! Set the original items
41 list.Items = items
42 return n, true
43 })
44}
45
46func flattenListType(
47 ot *ast.ListType,
48 item *ast.ObjectItem,
49 items []*ast.ObjectItem,
50 frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
51 // If the list is empty, keep the original list
52 if len(ot.List) == 0 {
53 items = append(items, item)
54 return items, frontier
55 }
56
57 // All the elements of this object must also be objects!
58 for _, subitem := range ot.List {
59 if _, ok := subitem.(*ast.ObjectType); !ok {
60 items = append(items, item)
61 return items, frontier
62 }
63 }
64
65 // Great! We have a match go through all the items and flatten
66 for _, elem := range ot.List {
67 // Add it to the frontier so that we can recurse
68 frontier = append(frontier, &ast.ObjectItem{
69 Keys: item.Keys,
70 Assign: item.Assign,
71 Val: elem,
72 LeadComment: item.LeadComment,
73 LineComment: item.LineComment,
74 })
75 }
76
77 return items, frontier
78}
79
80func flattenObjectType(
81 ot *ast.ObjectType,
82 item *ast.ObjectItem,
83 items []*ast.ObjectItem,
84 frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
85 // If the list has no items we do not have to flatten anything
86 if ot.List.Items == nil {
87 items = append(items, item)
88 return items, frontier
89 }
90
91 // All the elements of this object must also be objects!
92 for _, subitem := range ot.List.Items {
93 if _, ok := subitem.Val.(*ast.ObjectType); !ok {
94 items = append(items, item)
95 return items, frontier
96 }
97 }
98
99 // Great! We have a match go through all the items and flatten
100 for _, subitem := range ot.List.Items {
101 // Copy the new key
102 keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
103 copy(keys, item.Keys)
104 copy(keys[len(item.Keys):], subitem.Keys)
105
106 // Add it to the frontier so that we can recurse
107 frontier = append(frontier, &ast.ObjectItem{
108 Keys: keys,
109 Assign: item.Assign,
110 Val: subitem.Val,
111 LeadComment: item.LeadComment,
112 LineComment: item.LineComment,
113 })
114 }
115
116 return items, frontier
117}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
new file mode 100644
index 0000000..125a5f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -0,0 +1,313 @@
1package parser
2
3import (
4 "errors"
5 "fmt"
6
7 "github.com/hashicorp/hcl/hcl/ast"
8 hcltoken "github.com/hashicorp/hcl/hcl/token"
9 "github.com/hashicorp/hcl/json/scanner"
10 "github.com/hashicorp/hcl/json/token"
11)
12
13type Parser struct {
14 sc *scanner.Scanner
15
16 // Last read token
17 tok token.Token
18 commaPrev token.Token
19
20 enableTrace bool
21 indent int
22 n int // buffer size (max = 1)
23}
24
25func newParser(src []byte) *Parser {
26 return &Parser{
27 sc: scanner.New(src),
28 }
29}
30
31// Parse returns the fully parsed source and returns the abstract syntax tree.
32func Parse(src []byte) (*ast.File, error) {
33 p := newParser(src)
34 return p.Parse()
35}
36
37var errEofToken = errors.New("EOF token found")
38
39// Parse returns the fully parsed source and returns the abstract syntax tree.
40func (p *Parser) Parse() (*ast.File, error) {
41 f := &ast.File{}
42 var err, scerr error
43 p.sc.Error = func(pos token.Pos, msg string) {
44 scerr = fmt.Errorf("%s: %s", pos, msg)
45 }
46
47 // The root must be an object in JSON
48 object, err := p.object()
49 if scerr != nil {
50 return nil, scerr
51 }
52 if err != nil {
53 return nil, err
54 }
55
56 // We make our final node an object list so it is more HCL compatible
57 f.Node = object.List
58
59 // Flatten it, which finds patterns and turns them into more HCL-like
60 // AST trees.
61 flattenObjects(f.Node)
62
63 return f, nil
64}
65
66func (p *Parser) objectList() (*ast.ObjectList, error) {
67 defer un(trace(p, "ParseObjectList"))
68 node := &ast.ObjectList{}
69
70 for {
71 n, err := p.objectItem()
72 if err == errEofToken {
73 break // we are finished
74 }
75
76 // we don't return a nil node, because might want to use already
77 // collected items.
78 if err != nil {
79 return node, err
80 }
81
82 node.Add(n)
83
84 // Check for a followup comma. If it isn't a comma, then we're done
85 if tok := p.scan(); tok.Type != token.COMMA {
86 break
87 }
88 }
89
90 return node, nil
91}
92
93// objectItem parses a single object item
94func (p *Parser) objectItem() (*ast.ObjectItem, error) {
95 defer un(trace(p, "ParseObjectItem"))
96
97 keys, err := p.objectKey()
98 if err != nil {
99 return nil, err
100 }
101
102 o := &ast.ObjectItem{
103 Keys: keys,
104 }
105
106 switch p.tok.Type {
107 case token.COLON:
108 pos := p.tok.Pos
109 o.Assign = hcltoken.Pos{
110 Filename: pos.Filename,
111 Offset: pos.Offset,
112 Line: pos.Line,
113 Column: pos.Column,
114 }
115
116 o.Val, err = p.objectValue()
117 if err != nil {
118 return nil, err
119 }
120 }
121
122 return o, nil
123}
124
125// objectKey parses an object key and returns a ObjectKey AST
126func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
127 keyCount := 0
128 keys := make([]*ast.ObjectKey, 0)
129
130 for {
131 tok := p.scan()
132 switch tok.Type {
133 case token.EOF:
134 return nil, errEofToken
135 case token.STRING:
136 keyCount++
137 keys = append(keys, &ast.ObjectKey{
138 Token: p.tok.HCLToken(),
139 })
140 case token.COLON:
141 // If we have a zero keycount it means that we never got
142 // an object key, i.e. `{ :`. This is a syntax error.
143 if keyCount == 0 {
144 return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
145 }
146
147 // Done
148 return keys, nil
149 case token.ILLEGAL:
150 return nil, errors.New("illegal")
151 default:
152 return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
153 }
154 }
155}
156
157// object parses any type of object, such as number, bool, string, object or
158// list.
159func (p *Parser) objectValue() (ast.Node, error) {
160 defer un(trace(p, "ParseObjectValue"))
161 tok := p.scan()
162
163 switch tok.Type {
164 case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
165 return p.literalType()
166 case token.LBRACE:
167 return p.objectType()
168 case token.LBRACK:
169 return p.listType()
170 case token.EOF:
171 return nil, errEofToken
172 }
173
174 return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
175}
176
177// object parses any type of object, such as number, bool, string, object or
178// list.
179func (p *Parser) object() (*ast.ObjectType, error) {
180 defer un(trace(p, "ParseType"))
181 tok := p.scan()
182
183 switch tok.Type {
184 case token.LBRACE:
185 return p.objectType()
186 case token.EOF:
187 return nil, errEofToken
188 }
189
190 return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
191}
192
193// objectType parses an object type and returns a ObjectType AST
194func (p *Parser) objectType() (*ast.ObjectType, error) {
195 defer un(trace(p, "ParseObjectType"))
196
197 // we assume that the currently scanned token is a LBRACE
198 o := &ast.ObjectType{}
199
200 l, err := p.objectList()
201
202 // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
203 // not a RBRACE, it's an syntax error and we just return it.
204 if err != nil && p.tok.Type != token.RBRACE {
205 return nil, err
206 }
207
208 o.List = l
209 return o, nil
210}
211
212// listType parses a list type and returns a ListType AST
213func (p *Parser) listType() (*ast.ListType, error) {
214 defer un(trace(p, "ParseListType"))
215
216 // we assume that the currently scanned token is a LBRACK
217 l := &ast.ListType{}
218
219 for {
220 tok := p.scan()
221 switch tok.Type {
222 case token.NUMBER, token.FLOAT, token.STRING:
223 node, err := p.literalType()
224 if err != nil {
225 return nil, err
226 }
227
228 l.Add(node)
229 case token.COMMA:
230 continue
231 case token.LBRACE:
232 node, err := p.objectType()
233 if err != nil {
234 return nil, err
235 }
236
237 l.Add(node)
238 case token.BOOL:
239 // TODO(arslan) should we support? not supported by HCL yet
240 case token.LBRACK:
241 // TODO(arslan) should we support nested lists? Even though it's
242 // written in README of HCL, it's not a part of the grammar
243 // (not defined in parse.y)
244 case token.RBRACK:
245 // finished
246 return l, nil
247 default:
248 return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
249 }
250
251 }
252}
253
254// literalType parses a literal type and returns a LiteralType AST
255func (p *Parser) literalType() (*ast.LiteralType, error) {
256 defer un(trace(p, "ParseLiteral"))
257
258 return &ast.LiteralType{
259 Token: p.tok.HCLToken(),
260 }, nil
261}
262
263// scan returns the next token from the underlying scanner. If a token has
264// been unscanned then read that instead.
265func (p *Parser) scan() token.Token {
266 // If we have a token on the buffer, then return it.
267 if p.n != 0 {
268 p.n = 0
269 return p.tok
270 }
271
272 p.tok = p.sc.Scan()
273 return p.tok
274}
275
276// unscan pushes the previously read token back onto the buffer.
277func (p *Parser) unscan() {
278 p.n = 1
279}
280
281// ----------------------------------------------------------------------------
282// Parsing support
283
284func (p *Parser) printTrace(a ...interface{}) {
285 if !p.enableTrace {
286 return
287 }
288
289 const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
290 const n = len(dots)
291 fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
292
293 i := 2 * p.indent
294 for i > n {
295 fmt.Print(dots)
296 i -= n
297 }
298 // i <= n
299 fmt.Print(dots[0:i])
300 fmt.Println(a...)
301}
302
303func trace(p *Parser, msg string) *Parser {
304 p.printTrace(msg, "(")
305 p.indent++
306 return p
307}
308
309// Usage pattern: defer un(trace(p, "..."))
310func un(p *Parser) {
311 p.indent--
312 p.printTrace(")")
313}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
new file mode 100644
index 0000000..dd5c72b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
@@ -0,0 +1,451 @@
1package scanner
2
3import (
4 "bytes"
5 "fmt"
6 "os"
7 "unicode"
8 "unicode/utf8"
9
10 "github.com/hashicorp/hcl/json/token"
11)
12
13// eof represents a marker rune for the end of the reader.
14const eof = rune(0)
15
16// Scanner defines a lexical scanner
17type Scanner struct {
18 buf *bytes.Buffer // Source buffer for advancing and scanning
19 src []byte // Source buffer for immutable access
20
21 // Source Position
22 srcPos token.Pos // current position
23 prevPos token.Pos // previous position, used for peek() method
24
25 lastCharLen int // length of last character in bytes
26 lastLineLen int // length of last line in characters (for correct column reporting)
27
28 tokStart int // token text start position
29 tokEnd int // token text end position
30
31 // Error is called for each error encountered. If no Error
32 // function is set, the error is reported to os.Stderr.
33 Error func(pos token.Pos, msg string)
34
35 // ErrorCount is incremented by one for each error encountered.
36 ErrorCount int
37
38 // tokPos is the start position of most recently scanned token; set by
39 // Scan. The Filename field is always left untouched by the Scanner. If
40 // an error is reported (via Error) and Position is invalid, the scanner is
41 // not inside a token.
42 tokPos token.Pos
43}
44
45// New creates and initializes a new instance of Scanner using src as
46// its source content.
47func New(src []byte) *Scanner {
48 // even though we accept a src, we read from a io.Reader compatible type
49 // (*bytes.Buffer). So in the future we might easily change it to streaming
50 // read.
51 b := bytes.NewBuffer(src)
52 s := &Scanner{
53 buf: b,
54 src: src,
55 }
56
57 // srcPosition always starts with 1
58 s.srcPos.Line = 1
59 return s
60}
61
62// next reads the next rune from the bufferred reader. Returns the rune(0) if
63// an error occurs (or io.EOF is returned).
64func (s *Scanner) next() rune {
65 ch, size, err := s.buf.ReadRune()
66 if err != nil {
67 // advance for error reporting
68 s.srcPos.Column++
69 s.srcPos.Offset += size
70 s.lastCharLen = size
71 return eof
72 }
73
74 if ch == utf8.RuneError && size == 1 {
75 s.srcPos.Column++
76 s.srcPos.Offset += size
77 s.lastCharLen = size
78 s.err("illegal UTF-8 encoding")
79 return ch
80 }
81
82 // remember last position
83 s.prevPos = s.srcPos
84
85 s.srcPos.Column++
86 s.lastCharLen = size
87 s.srcPos.Offset += size
88
89 if ch == '\n' {
90 s.srcPos.Line++
91 s.lastLineLen = s.srcPos.Column
92 s.srcPos.Column = 0
93 }
94
95 // debug
96 // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
97 return ch
98}
99
100// unread unreads the previous read Rune and updates the source position
101func (s *Scanner) unread() {
102 if err := s.buf.UnreadRune(); err != nil {
103 panic(err) // this is user fault, we should catch it
104 }
105 s.srcPos = s.prevPos // put back last position
106}
107
108// peek returns the next rune without advancing the reader.
109func (s *Scanner) peek() rune {
110 peek, _, err := s.buf.ReadRune()
111 if err != nil {
112 return eof
113 }
114
115 s.buf.UnreadRune()
116 return peek
117}
118
119// Scan scans the next token and returns the token.
120func (s *Scanner) Scan() token.Token {
121 ch := s.next()
122
123 // skip white space
124 for isWhitespace(ch) {
125 ch = s.next()
126 }
127
128 var tok token.Type
129
130 // token text markings
131 s.tokStart = s.srcPos.Offset - s.lastCharLen
132
133 // token position, initial next() is moving the offset by one(size of rune
134 // actually), though we are interested with the starting point
135 s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
136 if s.srcPos.Column > 0 {
137 // common case: last character was not a '\n'
138 s.tokPos.Line = s.srcPos.Line
139 s.tokPos.Column = s.srcPos.Column
140 } else {
141 // last character was a '\n'
142 // (we cannot be at the beginning of the source
143 // since we have called next() at least once)
144 s.tokPos.Line = s.srcPos.Line - 1
145 s.tokPos.Column = s.lastLineLen
146 }
147
148 switch {
149 case isLetter(ch):
150 lit := s.scanIdentifier()
151 if lit == "true" || lit == "false" {
152 tok = token.BOOL
153 } else if lit == "null" {
154 tok = token.NULL
155 } else {
156 s.err("illegal char")
157 }
158 case isDecimal(ch):
159 tok = s.scanNumber(ch)
160 default:
161 switch ch {
162 case eof:
163 tok = token.EOF
164 case '"':
165 tok = token.STRING
166 s.scanString()
167 case '.':
168 tok = token.PERIOD
169 ch = s.peek()
170 if isDecimal(ch) {
171 tok = token.FLOAT
172 ch = s.scanMantissa(ch)
173 ch = s.scanExponent(ch)
174 }
175 case '[':
176 tok = token.LBRACK
177 case ']':
178 tok = token.RBRACK
179 case '{':
180 tok = token.LBRACE
181 case '}':
182 tok = token.RBRACE
183 case ',':
184 tok = token.COMMA
185 case ':':
186 tok = token.COLON
187 case '-':
188 if isDecimal(s.peek()) {
189 ch := s.next()
190 tok = s.scanNumber(ch)
191 } else {
192 s.err("illegal char")
193 }
194 default:
195 s.err("illegal char: " + string(ch))
196 }
197 }
198
199 // finish token ending
200 s.tokEnd = s.srcPos.Offset
201
202 // create token literal
203 var tokenText string
204 if s.tokStart >= 0 {
205 tokenText = string(s.src[s.tokStart:s.tokEnd])
206 }
207 s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
208
209 return token.Token{
210 Type: tok,
211 Pos: s.tokPos,
212 Text: tokenText,
213 }
214}
215
216// scanNumber scans a HCL number definition starting with the given rune
217func (s *Scanner) scanNumber(ch rune) token.Type {
218 zero := ch == '0'
219 pos := s.srcPos
220
221 s.scanMantissa(ch)
222 ch = s.next() // seek forward
223 if ch == 'e' || ch == 'E' {
224 ch = s.scanExponent(ch)
225 return token.FLOAT
226 }
227
228 if ch == '.' {
229 ch = s.scanFraction(ch)
230 if ch == 'e' || ch == 'E' {
231 ch = s.next()
232 ch = s.scanExponent(ch)
233 }
234 return token.FLOAT
235 }
236
237 if ch != eof {
238 s.unread()
239 }
240
241 // If we have a larger number and this is zero, error
242 if zero && pos != s.srcPos {
243 s.err("numbers cannot start with 0")
244 }
245
246 return token.NUMBER
247}
248
249// scanMantissa scans the mantissa begining from the rune. It returns the next
250// non decimal rune. It's used to determine wheter it's a fraction or exponent.
251func (s *Scanner) scanMantissa(ch rune) rune {
252 scanned := false
253 for isDecimal(ch) {
254 ch = s.next()
255 scanned = true
256 }
257
258 if scanned && ch != eof {
259 s.unread()
260 }
261 return ch
262}
263
264// scanFraction scans the fraction after the '.' rune
265func (s *Scanner) scanFraction(ch rune) rune {
266 if ch == '.' {
267 ch = s.peek() // we peek just to see if we can move forward
268 ch = s.scanMantissa(ch)
269 }
270 return ch
271}
272
273// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
274// rune.
275func (s *Scanner) scanExponent(ch rune) rune {
276 if ch == 'e' || ch == 'E' {
277 ch = s.next()
278 if ch == '-' || ch == '+' {
279 ch = s.next()
280 }
281 ch = s.scanMantissa(ch)
282 }
283 return ch
284}
285
286// scanString scans a quoted string
287func (s *Scanner) scanString() {
288 braces := 0
289 for {
290 // '"' opening already consumed
291 // read character after quote
292 ch := s.next()
293
294 if ch == '\n' || ch < 0 || ch == eof {
295 s.err("literal not terminated")
296 return
297 }
298
299 if ch == '"' {
300 break
301 }
302
303 // If we're going into a ${} then we can ignore quotes for awhile
304 if braces == 0 && ch == '$' && s.peek() == '{' {
305 braces++
306 s.next()
307 } else if braces > 0 && ch == '{' {
308 braces++
309 }
310 if braces > 0 && ch == '}' {
311 braces--
312 }
313
314 if ch == '\\' {
315 s.scanEscape()
316 }
317 }
318
319 return
320}
321
322// scanEscape scans an escape sequence
323func (s *Scanner) scanEscape() rune {
324 // http://en.cppreference.com/w/cpp/language/escape
325 ch := s.next() // read character after '/'
326 switch ch {
327 case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
328 // nothing to do
329 case '0', '1', '2', '3', '4', '5', '6', '7':
330 // octal notation
331 ch = s.scanDigits(ch, 8, 3)
332 case 'x':
333 // hexademical notation
334 ch = s.scanDigits(s.next(), 16, 2)
335 case 'u':
336 // universal character name
337 ch = s.scanDigits(s.next(), 16, 4)
338 case 'U':
339 // universal character name
340 ch = s.scanDigits(s.next(), 16, 8)
341 default:
342 s.err("illegal char escape")
343 }
344 return ch
345}
346
347// scanDigits scans a rune with the given base for n times. For example an
348// octal notation \184 would yield in scanDigits(ch, 8, 3)
349func (s *Scanner) scanDigits(ch rune, base, n int) rune {
350 for n > 0 && digitVal(ch) < base {
351 ch = s.next()
352 n--
353 }
354 if n > 0 {
355 s.err("illegal char escape")
356 }
357
358 // we scanned all digits, put the last non digit char back
359 s.unread()
360 return ch
361}
362
363// scanIdentifier scans an identifier and returns the literal string
364func (s *Scanner) scanIdentifier() string {
365 offs := s.srcPos.Offset - s.lastCharLen
366 ch := s.next()
367 for isLetter(ch) || isDigit(ch) || ch == '-' {
368 ch = s.next()
369 }
370
371 if ch != eof {
372 s.unread() // we got identifier, put back latest char
373 }
374
375 return string(s.src[offs:s.srcPos.Offset])
376}
377
378// recentPosition returns the position of the character immediately after the
379// character or token returned by the last call to Scan.
380func (s *Scanner) recentPosition() (pos token.Pos) {
381 pos.Offset = s.srcPos.Offset - s.lastCharLen
382 switch {
383 case s.srcPos.Column > 0:
384 // common case: last character was not a '\n'
385 pos.Line = s.srcPos.Line
386 pos.Column = s.srcPos.Column
387 case s.lastLineLen > 0:
388 // last character was a '\n'
389 // (we cannot be at the beginning of the source
390 // since we have called next() at least once)
391 pos.Line = s.srcPos.Line - 1
392 pos.Column = s.lastLineLen
393 default:
394 // at the beginning of the source
395 pos.Line = 1
396 pos.Column = 1
397 }
398 return
399}
400
401// err prints the error of any scanning to s.Error function. If the function is
402// not defined, by default it prints them to os.Stderr
403func (s *Scanner) err(msg string) {
404 s.ErrorCount++
405 pos := s.recentPosition()
406
407 if s.Error != nil {
408 s.Error(pos, msg)
409 return
410 }
411
412 fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
413}
414
415// isHexadecimal returns true if the given rune is a letter
416func isLetter(ch rune) bool {
417 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
418}
419
420// isHexadecimal returns true if the given rune is a decimal digit
421func isDigit(ch rune) bool {
422 return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
423}
424
425// isHexadecimal returns true if the given rune is a decimal number
426func isDecimal(ch rune) bool {
427 return '0' <= ch && ch <= '9'
428}
429
430// isHexadecimal returns true if the given rune is an hexadecimal number
431func isHexadecimal(ch rune) bool {
432 return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
433}
434
435// isWhitespace returns true if the rune is a space, tab, newline or carriage return
436func isWhitespace(ch rune) bool {
437 return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
438}
439
440// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
441func digitVal(ch rune) int {
442 switch {
443 case '0' <= ch && ch <= '9':
444 return int(ch - '0')
445 case 'a' <= ch && ch <= 'f':
446 return int(ch - 'a' + 10)
447 case 'A' <= ch && ch <= 'F':
448 return int(ch - 'A' + 10)
449 }
450 return 16 // larger than any legal digit val
451}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go
new file mode 100644
index 0000000..59c1bb7
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/position.go
@@ -0,0 +1,46 @@
1package token
2
3import "fmt"
4
5// Pos describes an arbitrary source position
6// including the file, line, and column location.
7// A Position is valid if the line number is > 0.
8type Pos struct {
9 Filename string // filename, if any
10 Offset int // offset, starting at 0
11 Line int // line number, starting at 1
12 Column int // column number, starting at 1 (character count)
13}
14
15// IsValid returns true if the position is valid.
16func (p *Pos) IsValid() bool { return p.Line > 0 }
17
18// String returns a string in one of several forms:
19//
20// file:line:column valid position with file name
21// line:column valid position without file name
22// file invalid position with file name
23// - invalid position without file name
24func (p Pos) String() string {
25 s := p.Filename
26 if p.IsValid() {
27 if s != "" {
28 s += ":"
29 }
30 s += fmt.Sprintf("%d:%d", p.Line, p.Column)
31 }
32 if s == "" {
33 s = "-"
34 }
35 return s
36}
37
38// Before reports whether the position p is before u.
39func (p Pos) Before(u Pos) bool {
40 return u.Offset > p.Offset || u.Line > p.Line
41}
42
43// After reports whether the position p is after u.
44func (p Pos) After(u Pos) bool {
45 return u.Offset < p.Offset || u.Line < p.Line
46}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go
new file mode 100644
index 0000000..95a0c3e
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/token.go
@@ -0,0 +1,118 @@
1package token
2
3import (
4 "fmt"
5 "strconv"
6
7 hcltoken "github.com/hashicorp/hcl/hcl/token"
8)
9
10// Token defines a single HCL token which can be obtained via the Scanner
11type Token struct {
12 Type Type
13 Pos Pos
14 Text string
15}
16
17// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
18type Type int
19
20const (
21 // Special tokens
22 ILLEGAL Type = iota
23 EOF
24
25 identifier_beg
26 literal_beg
27 NUMBER // 12345
28 FLOAT // 123.45
29 BOOL // true,false
30 STRING // "abc"
31 NULL // null
32 literal_end
33 identifier_end
34
35 operator_beg
36 LBRACK // [
37 LBRACE // {
38 COMMA // ,
39 PERIOD // .
40 COLON // :
41
42 RBRACK // ]
43 RBRACE // }
44
45 operator_end
46)
47
48var tokens = [...]string{
49 ILLEGAL: "ILLEGAL",
50
51 EOF: "EOF",
52
53 NUMBER: "NUMBER",
54 FLOAT: "FLOAT",
55 BOOL: "BOOL",
56 STRING: "STRING",
57 NULL: "NULL",
58
59 LBRACK: "LBRACK",
60 LBRACE: "LBRACE",
61 COMMA: "COMMA",
62 PERIOD: "PERIOD",
63 COLON: "COLON",
64
65 RBRACK: "RBRACK",
66 RBRACE: "RBRACE",
67}
68
69// String returns the string corresponding to the token tok.
70func (t Type) String() string {
71 s := ""
72 if 0 <= t && t < Type(len(tokens)) {
73 s = tokens[t]
74 }
75 if s == "" {
76 s = "token(" + strconv.Itoa(int(t)) + ")"
77 }
78 return s
79}
80
81// IsIdentifier returns true for tokens corresponding to identifiers and basic
82// type literals; it returns false otherwise.
83func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
84
85// IsLiteral returns true for tokens corresponding to basic type literals; it
86// returns false otherwise.
87func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
88
89// IsOperator returns true for tokens corresponding to operators and
90// delimiters; it returns false otherwise.
91func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
92
93// String returns the token's literal text. Note that this is only
94// applicable for certain token types, such as token.IDENT,
95// token.STRING, etc..
96func (t Token) String() string {
97 return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
98}
99
100// HCLToken converts this token to an HCL token.
101//
102// The token type must be a literal type or this will panic.
103func (t Token) HCLToken() hcltoken.Token {
104 switch t.Type {
105 case BOOL:
106 return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
107 case FLOAT:
108 return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
109 case NULL:
110 return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
111 case NUMBER:
112 return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
113 case STRING:
114 return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
115 default:
116 panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
117 }
118}
diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go
new file mode 100644
index 0000000..d9993c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/lex.go
@@ -0,0 +1,38 @@
1package hcl
2
3import (
4 "unicode"
5 "unicode/utf8"
6)
7
8type lexModeValue byte
9
10const (
11 lexModeUnknown lexModeValue = iota
12 lexModeHcl
13 lexModeJson
14)
15
16// lexMode returns whether we're going to be parsing in JSON
17// mode or HCL mode.
18func lexMode(v []byte) lexModeValue {
19 var (
20 r rune
21 w int
22 offset int
23 )
24
25 for {
26 r, w = utf8.DecodeRune(v[offset:])
27 offset += w
28 if unicode.IsSpace(r) {
29 continue
30 }
31 if r == '{' {
32 return lexModeJson
33 }
34 break
35 }
36
37 return lexModeHcl
38}
diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go
new file mode 100644
index 0000000..1fca53c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/parse.go
@@ -0,0 +1,39 @@
1package hcl
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl/hcl/ast"
7 hclParser "github.com/hashicorp/hcl/hcl/parser"
8 jsonParser "github.com/hashicorp/hcl/json/parser"
9)
10
11// ParseBytes accepts as input byte slice and returns ast tree.
12//
13// Input can be either JSON or HCL
14func ParseBytes(in []byte) (*ast.File, error) {
15 return parse(in)
16}
17
18// ParseString accepts input as a string and returns ast tree.
19func ParseString(input string) (*ast.File, error) {
20 return parse([]byte(input))
21}
22
23func parse(in []byte) (*ast.File, error) {
24 switch lexMode(in) {
25 case lexModeHcl:
26 return hclParser.Parse(in)
27 case lexModeJson:
28 return jsonParser.Parse(in)
29 }
30
31 return nil, fmt.Errorf("unknown config format")
32}
33
34// Parse parses the given input and returns the root object.
35//
36// The input format can be either HCL or JSON.
37func Parse(input string) (*ast.File, error) {
38 return parse([]byte(input))
39}
diff --git a/vendor/github.com/hashicorp/hil/LICENSE b/vendor/github.com/hashicorp/hil/LICENSE
new file mode 100644
index 0000000..82b4de9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/LICENSE
@@ -0,0 +1,353 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. “Contributor”
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. “Contributor Version”
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor’s Contribution.
14
151.3. “Contribution”
16
17 means Covered Software of a particular Contributor.
18
191.4. “Covered Software”
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. “Incompatible With Secondary Licenses”
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of version
33 1.1 or earlier of the License, but not also under the terms of a
34 Secondary License.
35
361.6. “Executable Form”
37
38 means any form of the work other than Source Code Form.
39
401.7. “Larger Work”
41
42 means a work that combines Covered Software with other material, in a separate
43 file or files, that is not Covered Software.
44
451.8. “License”
46
47 means this document.
48
491.9. “Licensable”
50
51 means having the right to grant, to the maximum extent possible, whether at the
52 time of the initial grant or subsequently, any and all of the rights conveyed by
53 this License.
54
551.10. “Modifications”
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to, deletion
60 from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. “Patent Claims” of a Contributor
65
66 means any patent claim(s), including without limitation, method, process,
67 and apparatus claims, in any patent Licensable by such Contributor that
68 would be infringed, but for the grant of the License, by the making,
69 using, selling, offering for sale, having made, import, or transfer of
70 either its Contributions or its Contributor Version.
71
721.12. “Secondary License”
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. “Source Code Form”
79
80 means the form of the work preferred for making modifications.
81
821.14. “You” (or “Your”)
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, “You” includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, “control” means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or as
104 part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its Contributions
108 or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution become
113 effective for each Contribution on the date the Contributor first distributes
114 such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under this
119 License. No additional rights or licenses will be implied from the distribution
120 or licensing of Covered Software under this License. Notwithstanding Section
121 2.1(b) above, no patent license is granted by a Contributor:
122
123 a. for any code that a Contributor has removed from Covered Software; or
124
125 b. for infringements caused by: (i) Your and any other third party’s
126 modifications of Covered Software, or (ii) the combination of its
127 Contributions with other software (except as part of its Contributor
128 Version); or
129
130 c. under Patent Claims infringed by Covered Software in the absence of its
131 Contributions.
132
133 This License does not grant any rights in the trademarks, service marks, or
134 logos of any Contributor (except as may be necessary to comply with the
135 notice requirements in Section 3.4).
136
1372.4. Subsequent Licenses
138
139 No Contributor makes additional grants as a result of Your choice to
140 distribute the Covered Software under a subsequent version of this License
141 (see Section 10.2) or under the terms of a Secondary License (if permitted
142 under the terms of Section 3.3).
143
1442.5. Representation
145
146 Each Contributor represents that the Contributor believes its Contributions
147 are its original creation(s) or it has sufficient rights to grant the
148 rights to its Contributions conveyed by this License.
149
1502.6. Fair Use
151
152 This License is not intended to limit any rights You have under applicable
153 copyright doctrines of fair use, fair dealing, or other equivalents.
154
1552.7. Conditions
156
157 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
158 Section 2.1.
159
160
1613. Responsibilities
162
1633.1. Distribution of Source Form
164
165 All distribution of Covered Software in Source Code Form, including any
166 Modifications that You create or to which You contribute, must be under the
167 terms of this License. You must inform recipients that the Source Code Form
168 of the Covered Software is governed by the terms of this License, and how
169 they can obtain a copy of this License. You may not attempt to alter or
170 restrict the recipients’ rights in the Source Code Form.
171
1723.2. Distribution of Executable Form
173
174 If You distribute Covered Software in Executable Form then:
175
176 a. such Covered Software must also be made available in Source Code Form,
177 as described in Section 3.1, and You must inform recipients of the
178 Executable Form how they can obtain a copy of such Source Code Form by
179 reasonable means in a timely manner, at a charge no more than the cost
180 of distribution to the recipient; and
181
182 b. You may distribute such Executable Form under the terms of this License,
183 or sublicense it under different terms, provided that the license for
184 the Executable Form does not attempt to limit or alter the recipients’
185 rights in the Source Code Form under this License.
186
1873.3. Distribution of a Larger Work
188
189 You may create and distribute a Larger Work under terms of Your choice,
190 provided that You also comply with the requirements of this License for the
191 Covered Software. If the Larger Work is a combination of Covered Software
192 with a work governed by one or more Secondary Licenses, and the Covered
193 Software is not Incompatible With Secondary Licenses, this License permits
194 You to additionally distribute such Covered Software under the terms of
195 such Secondary License(s), so that the recipient of the Larger Work may, at
196 their option, further distribute the Covered Software under the terms of
197 either this License or such Secondary License(s).
198
1993.4. Notices
200
201 You may not remove or alter the substance of any license notices (including
202 copyright notices, patent notices, disclaimers of warranty, or limitations
203 of liability) contained within the Source Code Form of the Covered
204 Software, except that You may alter any license notices to the extent
205 required to remedy known factual inaccuracies.
206
2073.5. Application of Additional Terms
208
209 You may choose to offer, and to charge a fee for, warranty, support,
210 indemnity or liability obligations to one or more recipients of Covered
211 Software. However, You may do so only on Your own behalf, and not on behalf
212 of any Contributor. You must make it absolutely clear that any such
213 warranty, support, indemnity, or liability obligation is offered by You
214 alone, and You hereby agree to indemnify every Contributor for any
215 liability incurred by such Contributor as a result of warranty, support,
216 indemnity or liability terms You offer. You may include additional
217 disclaimers of warranty and limitations of liability specific to any
218 jurisdiction.
219
2204. Inability to Comply Due to Statute or Regulation
221
222 If it is impossible for You to comply with any of the terms of this License
223 with respect to some or all of the Covered Software due to statute, judicial
224 order, or regulation then You must: (a) comply with the terms of this License
225 to the maximum extent possible; and (b) describe the limitations and the code
226 they affect. Such description must be placed in a text file included with all
227 distributions of the Covered Software under this License. Except to the
228 extent prohibited by statute or regulation, such description must be
229 sufficiently detailed for a recipient of ordinary skill to be able to
230 understand it.
231
2325. Termination
233
2345.1. The rights granted under this License will terminate automatically if You
235 fail to comply with any of its terms. However, if You become compliant,
236 then the rights granted under this License from a particular Contributor
237 are reinstated (a) provisionally, unless and until such Contributor
238 explicitly and finally terminates Your grants, and (b) on an ongoing basis,
239 if such Contributor fails to notify You of the non-compliance by some
240 reasonable means prior to 60 days after You have come back into compliance.
241 Moreover, Your grants from a particular Contributor are reinstated on an
242 ongoing basis if such Contributor notifies You of the non-compliance by
243 some reasonable means, this is the first time You have received notice of
244 non-compliance with this License from such Contributor, and You become
245 compliant prior to 30 days after Your receipt of the notice.
246
2475.2. If You initiate litigation against any entity by asserting a patent
248 infringement claim (excluding declaratory judgment actions, counter-claims,
249 and cross-claims) alleging that a Contributor Version directly or
250 indirectly infringes any patent, then the rights granted to You by any and
251 all Contributors for the Covered Software under Section 2.1 of this License
252 shall terminate.
253
2545.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
255 license agreements (excluding distributors and resellers) which have been
256 validly granted by You or Your distributors under this License prior to
257 termination shall survive termination.
258
2596. Disclaimer of Warranty
260
261 Covered Software is provided under this License on an “as is” basis, without
262 warranty of any kind, either expressed, implied, or statutory, including,
263 without limitation, warranties that the Covered Software is free of defects,
264 merchantable, fit for a particular purpose or non-infringing. The entire
265 risk as to the quality and performance of the Covered Software is with You.
266 Should any Covered Software prove defective in any respect, You (not any
267 Contributor) assume the cost of any necessary servicing, repair, or
268 correction. This disclaimer of warranty constitutes an essential part of this
269 License. No use of any Covered Software is authorized under this License
270 except under this disclaimer.
271
2727. Limitation of Liability
273
274 Under no circumstances and under no legal theory, whether tort (including
275 negligence), contract, or otherwise, shall any Contributor, or anyone who
276 distributes Covered Software as permitted above, be liable to You for any
277 direct, indirect, special, incidental, or consequential damages of any
278 character including, without limitation, damages for lost profits, loss of
279 goodwill, work stoppage, computer failure or malfunction, or any and all
280 other commercial damages or losses, even if such party shall have been
281 informed of the possibility of such damages. This limitation of liability
282 shall not apply to liability for death or personal injury resulting from such
283 party’s negligence to the extent applicable law prohibits such limitation.
284 Some jurisdictions do not allow the exclusion or limitation of incidental or
285 consequential damages, so this exclusion and limitation may not apply to You.
286
2878. Litigation
288
289 Any litigation relating to this License may be brought only in the courts of
290 a jurisdiction where the defendant maintains its principal place of business
291 and such litigation shall be governed by laws of that jurisdiction, without
292 reference to its conflict-of-law provisions. Nothing in this Section shall
293 prevent a party’s ability to bring cross-claims or counter-claims.
294
2959. Miscellaneous
296
297 This License represents the complete agreement concerning the subject matter
298 hereof. If any provision of this License is held to be unenforceable, such
299 provision shall be reformed only to the extent necessary to make it
300 enforceable. Any law or regulation which provides that the language of a
301 contract shall be construed against the drafter shall not be used to construe
302 this License against a Contributor.
303
304
30510. Versions of the License
306
30710.1. New Versions
308
309 Mozilla Foundation is the license steward. Except as provided in Section
310 10.3, no one other than the license steward has the right to modify or
311 publish new versions of this License. Each version will be given a
312 distinguishing version number.
313
31410.2. Effect of New Versions
315
316 You may distribute the Covered Software under the terms of the version of
317 the License under which You originally received the Covered Software, or
318 under the terms of any subsequent version published by the license
319 steward.
320
32110.3. Modified Versions
322
323 If you create software not governed by this License, and you want to
324 create a new license for such software, you may create and use a modified
325 version of this License if you rename the license and remove any
326 references to the name of the license steward (except to note that such
327 modified license differs from this License).
328
32910.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
330 If You choose to distribute Source Code Form that is Incompatible With
331 Secondary Licenses under the terms of this version of the License, the
332 notice described in Exhibit B of this License must be attached.
333
334Exhibit A - Source Code Form License Notice
335
336 This Source Code Form is subject to the
337 terms of the Mozilla Public License, v.
338 2.0. If a copy of the MPL was not
339 distributed with this file, You can
340 obtain one at
341 http://mozilla.org/MPL/2.0/.
342
343If it is not possible or desirable to put the notice in a particular file, then
344You may include the notice in a location (such as a LICENSE file in a relevant
345directory) where a recipient would be likely to look for such a notice.
346
347You may add additional accurate notices of copyright ownership.
348
349Exhibit B - “Incompatible With Secondary Licenses” Notice
350
351 This Source Code Form is “Incompatible
352 With Secondary Licenses”, as defined by
353 the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/hil/README.md b/vendor/github.com/hashicorp/hil/README.md
new file mode 100644
index 0000000..186ed25
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/README.md
@@ -0,0 +1,102 @@
1# HIL
2
3[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://travis-ci.org/hashicorp/hil.svg?branch=master)](https://travis-ci.org/hashicorp/hil)
4
5HIL (HashiCorp Interpolation Language) is a lightweight embedded language used
6primarily for configuration interpolation. The goal of HIL is to make a simple
7language for interpolations in the various configurations of HashiCorp tools.
8
9HIL is built to interpolate any string, but is in use by HashiCorp primarily
10with [HCL](https://github.com/hashicorp/hcl). HCL is _not required_ in any
11way for use with HIL.
12
13HIL isn't meant to be a general purpose language. It was built for basic
14configuration interpolations. Therefore, you can't currently write functions,
15have conditionals, set intermediary variables, etc. within HIL itself. It is
16possible some of these may be added later but the right use case must exist.
17
18## Why?
19
20Many of our tools have support for something similar to templates, but
21within the configuration itself. The most prominent requirement was in
22[Terraform](https://github.com/hashicorp/terraform) where we wanted the
23configuration to be able to reference values from elsewhere in the
24configuration. Example:
25
26 foo = "hi ${var.world}"
27
28We originally used a full templating language for this, but found it
29was too heavy weight. Additionally, many full languages required bindings
30to C (and thus the usage of cgo) which we try to avoid to make cross-compilation
31easier. We then moved to very basic regular expression based
32string replacement, but found the need for basic arithmetic and function
33calls resulting in overly complex regular expressions.
34
35Ultimately, we wrote our own mini-language within Terraform itself. As
36we built other projects such as [Nomad](https://nomadproject.io) and
37[Otto](https://ottoproject.io), the need for basic interpolations arose
38again.
39
40Thus HIL was born. It is extracted from Terraform, cleaned up, and
41better tested for general purpose use.
42
43## Syntax
44
45For a complete grammar, please see the parser itself. A high-level overview
46of the syntax and grammer is listed here.
47
48Code begins within `${` and `}`. Outside of this, text is treated
49literally. For example, `foo` is a valid HIL program that is just the
50string "foo", but `foo ${bar}` is an HIL program that is the string "foo "
51concatened with the value of `bar`. For the remainder of the syntax
52docs, we'll assume you're within `${}`.
53
54 * Identifiers are any text in the format of `[a-zA-Z0-9-.]`. Example
55 identifiers: `foo`, `var.foo`, `foo-bar`.
56
57 * Strings are double quoted and can contain any UTF-8 characters.
58 Example: `"Hello, World"`
59
60 * Numbers are assumed to be base 10. If you prefix a number with 0x,
61 it is treated as a hexadecimal. If it is prefixed with 0, it is
62 treated as an octal. Numbers can be in scientific notation: "1e10".
63
64 * Unary `-` can be used for negative numbers. Example: `-10` or `-0.2`
65
66 * Boolean values: `true`, `false`
67
68 * The following arithmetic operations are allowed: +, -, *, /, %.
69
70 * Function calls are in the form of `name(arg1, arg2, ...)`. Example:
71 `add(1, 5)`. Arguments can be any valid HIL expression, example:
72 `add(1, var.foo)` or even nested function calls:
73 `add(1, get("some value"))`.
74
75 * Within strings, further interpolations can be opened with `${}`.
76 Example: `"Hello ${nested}"`. A full example including the
77 original `${}` (remember this list assumes were inside of one
78 already) could be: `foo ${func("hello ${var.foo}")}`.
79
80## Language Changes
81
82We've used this mini-language in Terraform for years. For backwards compatibility
83reasons, we're unlikely to make an incompatible change to the language but
84we're not currently making that promise, either.
85
86The internal API of this project may very well change as we evolve it
87to work with more of our projects. We recommend using some sort of dependency
88management solution with this package.
89
90## Future Changes
91
92The following changes are already planned to be made at some point:
93
94 * Richer types: lists, maps, etc.
95
96 * Convert to a more standard Go parser structure similar to HCL. This
97 will improve our error messaging as well as allow us to have automatic
98 formatting.
99
100 * Allow interpolations to result in more types than just a string. While
101 within the interpolation basic types are honored, the result is always
102 a string.
diff --git a/vendor/github.com/hashicorp/hil/appveyor.yml b/vendor/github.com/hashicorp/hil/appveyor.yml
new file mode 100644
index 0000000..feaf7a3
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/appveyor.yml
@@ -0,0 +1,18 @@
1version: "build-{branch}-{build}"
2image: Visual Studio 2015
3clone_folder: c:\gopath\src\github.com\hashicorp\hil
4environment:
5 GOPATH: c:\gopath
6init:
7 - git config --global core.autocrlf true
8install:
9- cmd: >-
10 echo %Path%
11
12 go version
13
14 go env
15
16 go get -d -v -t ./...
17build_script:
18- cmd: go test -v ./...
diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic.go b/vendor/github.com/hashicorp/hil/ast/arithmetic.go
new file mode 100644
index 0000000..94dc24f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/arithmetic.go
@@ -0,0 +1,43 @@
1package ast
2
3import (
4 "bytes"
5 "fmt"
6)
7
8// Arithmetic represents a node where the result is arithmetic of
9// two or more operands in the order given.
10type Arithmetic struct {
11 Op ArithmeticOp
12 Exprs []Node
13 Posx Pos
14}
15
16func (n *Arithmetic) Accept(v Visitor) Node {
17 for i, expr := range n.Exprs {
18 n.Exprs[i] = expr.Accept(v)
19 }
20
21 return v(n)
22}
23
24func (n *Arithmetic) Pos() Pos {
25 return n.Posx
26}
27
28func (n *Arithmetic) GoString() string {
29 return fmt.Sprintf("*%#v", *n)
30}
31
32func (n *Arithmetic) String() string {
33 var b bytes.Buffer
34 for _, expr := range n.Exprs {
35 b.WriteString(fmt.Sprintf("%s", expr))
36 }
37
38 return b.String()
39}
40
41func (n *Arithmetic) Type(Scope) (Type, error) {
42 return TypeInt, nil
43}
diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go
new file mode 100644
index 0000000..18880c6
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go
@@ -0,0 +1,24 @@
1package ast
2
3// ArithmeticOp is the operation to use for the math.
4type ArithmeticOp int
5
6const (
7 ArithmeticOpInvalid ArithmeticOp = 0
8
9 ArithmeticOpAdd ArithmeticOp = iota
10 ArithmeticOpSub
11 ArithmeticOpMul
12 ArithmeticOpDiv
13 ArithmeticOpMod
14
15 ArithmeticOpLogicalAnd
16 ArithmeticOpLogicalOr
17
18 ArithmeticOpEqual
19 ArithmeticOpNotEqual
20 ArithmeticOpLessThan
21 ArithmeticOpLessThanOrEqual
22 ArithmeticOpGreaterThan
23 ArithmeticOpGreaterThanOrEqual
24)
diff --git a/vendor/github.com/hashicorp/hil/ast/ast.go b/vendor/github.com/hashicorp/hil/ast/ast.go
new file mode 100644
index 0000000..c6350f8
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/ast.go
@@ -0,0 +1,99 @@
1package ast
2
3import (
4 "fmt"
5)
6
7// Node is the interface that all AST nodes must implement.
8type Node interface {
9 // Accept is called to dispatch to the visitors. It must return the
10 // resulting Node (which might be different in an AST transform).
11 Accept(Visitor) Node
12
13 // Pos returns the position of this node in some source.
14 Pos() Pos
15
16 // Type returns the type of this node for the given context.
17 Type(Scope) (Type, error)
18}
19
20// Pos is the starting position of an AST node
21type Pos struct {
22 Column, Line int // Column/Line number, starting at 1
23 Filename string // Optional source filename, if known
24}
25
26func (p Pos) String() string {
27 if p.Filename == "" {
28 return fmt.Sprintf("%d:%d", p.Line, p.Column)
29 } else {
30 return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column)
31 }
32}
33
34// InitPos is an initiaial position value. This should be used as
35// the starting position (presets the column and line to 1).
36var InitPos = Pos{Column: 1, Line: 1}
37
38// Visitors are just implementations of this function.
39//
40// The function must return the Node to replace this node with. "nil" is
41// _not_ a valid return value. If there is no replacement, the original node
42// should be returned. We build this replacement directly into the visitor
43// pattern since AST transformations are a common and useful tool and
44// building it into the AST itself makes it required for future Node
45// implementations and very easy to do.
46//
47// Note that this isn't a true implementation of the visitor pattern, which
48// generally requires proper type dispatch on the function. However,
49// implementing this basic visitor pattern style is still very useful even
50// if you have to type switch.
51type Visitor func(Node) Node
52
53//go:generate stringer -type=Type
54
55// Type is the type of any value.
56type Type uint32
57
58const (
59 TypeInvalid Type = 0
60 TypeAny Type = 1 << iota
61 TypeBool
62 TypeString
63 TypeInt
64 TypeFloat
65 TypeList
66 TypeMap
67
68 // This is a special type used by Terraform to mark "unknown" values.
69 // It is impossible for this type to be introduced into your HIL programs
70 // unless you explicitly set a variable to this value. In that case,
71 // any operation including the variable will return "TypeUnknown" as the
72 // type.
73 TypeUnknown
74)
75
76func (t Type) Printable() string {
77 switch t {
78 case TypeInvalid:
79 return "invalid type"
80 case TypeAny:
81 return "any type"
82 case TypeBool:
83 return "type bool"
84 case TypeString:
85 return "type string"
86 case TypeInt:
87 return "type int"
88 case TypeFloat:
89 return "type float"
90 case TypeList:
91 return "type list"
92 case TypeMap:
93 return "type map"
94 case TypeUnknown:
95 return "type unknown"
96 default:
97 return "unknown type"
98 }
99}
diff --git a/vendor/github.com/hashicorp/hil/ast/call.go b/vendor/github.com/hashicorp/hil/ast/call.go
new file mode 100644
index 0000000..0557011
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/call.go
@@ -0,0 +1,47 @@
1package ast
2
3import (
4 "fmt"
5 "strings"
6)
7
8// Call represents a function call.
9type Call struct {
10 Func string
11 Args []Node
12 Posx Pos
13}
14
15func (n *Call) Accept(v Visitor) Node {
16 for i, a := range n.Args {
17 n.Args[i] = a.Accept(v)
18 }
19
20 return v(n)
21}
22
23func (n *Call) Pos() Pos {
24 return n.Posx
25}
26
27func (n *Call) String() string {
28 args := make([]string, len(n.Args))
29 for i, arg := range n.Args {
30 args[i] = fmt.Sprintf("%s", arg)
31 }
32
33 return fmt.Sprintf("Call(%s, %s)", n.Func, strings.Join(args, ", "))
34}
35
36func (n *Call) Type(s Scope) (Type, error) {
37 f, ok := s.LookupFunc(n.Func)
38 if !ok {
39 return TypeInvalid, fmt.Errorf("unknown function: %s", n.Func)
40 }
41
42 return f.ReturnType, nil
43}
44
45func (n *Call) GoString() string {
46 return fmt.Sprintf("*%#v", *n)
47}
diff --git a/vendor/github.com/hashicorp/hil/ast/conditional.go b/vendor/github.com/hashicorp/hil/ast/conditional.go
new file mode 100644
index 0000000..be48f89
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/conditional.go
@@ -0,0 +1,36 @@
1package ast
2
3import (
4 "fmt"
5)
6
7type Conditional struct {
8 CondExpr Node
9 TrueExpr Node
10 FalseExpr Node
11 Posx Pos
12}
13
14// Accept passes the given visitor to the child nodes in this order:
15// CondExpr, TrueExpr, FalseExpr. It then finally passes itself to the visitor.
16func (n *Conditional) Accept(v Visitor) Node {
17 n.CondExpr = n.CondExpr.Accept(v)
18 n.TrueExpr = n.TrueExpr.Accept(v)
19 n.FalseExpr = n.FalseExpr.Accept(v)
20
21 return v(n)
22}
23
24func (n *Conditional) Pos() Pos {
25 return n.Posx
26}
27
28func (n *Conditional) Type(Scope) (Type, error) {
29 // This is not actually a useful value; the type checker ignores
30 // this function when analyzing conditionals, just as with Arithmetic.
31 return TypeInt, nil
32}
33
34func (n *Conditional) GoString() string {
35 return fmt.Sprintf("*%#v", *n)
36}
diff --git a/vendor/github.com/hashicorp/hil/ast/index.go b/vendor/github.com/hashicorp/hil/ast/index.go
new file mode 100644
index 0000000..860c25f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/index.go
@@ -0,0 +1,76 @@
1package ast
2
3import (
4 "fmt"
5 "strings"
6)
7
8// Index represents an indexing operation into another data structure
9type Index struct {
10 Target Node
11 Key Node
12 Posx Pos
13}
14
15func (n *Index) Accept(v Visitor) Node {
16 n.Target = n.Target.Accept(v)
17 n.Key = n.Key.Accept(v)
18 return v(n)
19}
20
21func (n *Index) Pos() Pos {
22 return n.Posx
23}
24
25func (n *Index) String() string {
26 return fmt.Sprintf("Index(%s, %s)", n.Target, n.Key)
27}
28
29func (n *Index) Type(s Scope) (Type, error) {
30 variableAccess, ok := n.Target.(*VariableAccess)
31 if !ok {
32 return TypeInvalid, fmt.Errorf("target is not a variable")
33 }
34
35 variable, ok := s.LookupVar(variableAccess.Name)
36 if !ok {
37 return TypeInvalid, fmt.Errorf("unknown variable accessed: %s", variableAccess.Name)
38 }
39
40 switch variable.Type {
41 case TypeList:
42 return n.typeList(variable, variableAccess.Name)
43 case TypeMap:
44 return n.typeMap(variable, variableAccess.Name)
45 default:
46 return TypeInvalid, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type)
47 }
48}
49
50func (n *Index) typeList(variable Variable, variableName string) (Type, error) {
51 // We assume type checking has already determined that this is a list
52 list := variable.Value.([]Variable)
53
54 return VariableListElementTypesAreHomogenous(variableName, list)
55}
56
57func (n *Index) typeMap(variable Variable, variableName string) (Type, error) {
58 // We assume type checking has already determined that this is a map
59 vmap := variable.Value.(map[string]Variable)
60
61 return VariableMapValueTypesAreHomogenous(variableName, vmap)
62}
63
64func reportTypes(typesFound map[Type]struct{}) string {
65 stringTypes := make([]string, len(typesFound))
66 i := 0
67 for k, _ := range typesFound {
68 stringTypes[0] = k.String()
69 i++
70 }
71 return strings.Join(stringTypes, ", ")
72}
73
74func (n *Index) GoString() string {
75 return fmt.Sprintf("*%#v", *n)
76}
diff --git a/vendor/github.com/hashicorp/hil/ast/literal.go b/vendor/github.com/hashicorp/hil/ast/literal.go
new file mode 100644
index 0000000..da6014f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/literal.go
@@ -0,0 +1,88 @@
1package ast
2
3import (
4 "fmt"
5 "reflect"
6)
7
8// LiteralNode represents a single literal value, such as "foo" or
9// 42 or 3.14159. Based on the Type, the Value can be safely cast.
10type LiteralNode struct {
11 Value interface{}
12 Typex Type
13 Posx Pos
14}
15
16// NewLiteralNode returns a new literal node representing the given
17// literal Go value, which must correspond to one of the primitive types
18// supported by HIL. Lists and maps cannot currently be constructed via
19// this function.
20//
21// If an inappropriately-typed value is provided, this function will
22// return an error. The main intended use of this function is to produce
23// "synthetic" literals from constants in code, where the value type is
24// well known at compile time. To easily store these in global variables,
25// see also MustNewLiteralNode.
26func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) {
27 goType := reflect.TypeOf(value)
28 var hilType Type
29
30 switch goType.Kind() {
31 case reflect.Bool:
32 hilType = TypeBool
33 case reflect.Int:
34 hilType = TypeInt
35 case reflect.Float64:
36 hilType = TypeFloat
37 case reflect.String:
38 hilType = TypeString
39 default:
40 return nil, fmt.Errorf("unsupported literal node type: %T", value)
41 }
42
43 return &LiteralNode{
44 Value: value,
45 Typex: hilType,
46 Posx: pos,
47 }, nil
48}
49
50// MustNewLiteralNode wraps NewLiteralNode and panics if an error is
51// returned, thus allowing valid literal nodes to be easily assigned to
52// global variables.
53func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode {
54 node, err := NewLiteralNode(value, pos)
55 if err != nil {
56 panic(err)
57 }
58 return node
59}
60
61func (n *LiteralNode) Accept(v Visitor) Node {
62 return v(n)
63}
64
65func (n *LiteralNode) Pos() Pos {
66 return n.Posx
67}
68
69func (n *LiteralNode) GoString() string {
70 return fmt.Sprintf("*%#v", *n)
71}
72
73func (n *LiteralNode) String() string {
74 return fmt.Sprintf("Literal(%s, %v)", n.Typex, n.Value)
75}
76
77func (n *LiteralNode) Type(Scope) (Type, error) {
78 return n.Typex, nil
79}
80
81// IsUnknown returns true either if the node's value is itself unknown
82// of if it is a collection containing any unknown elements, deeply.
83func (n *LiteralNode) IsUnknown() bool {
84 return IsUnknown(Variable{
85 Type: n.Typex,
86 Value: n.Value,
87 })
88}
diff --git a/vendor/github.com/hashicorp/hil/ast/output.go b/vendor/github.com/hashicorp/hil/ast/output.go
new file mode 100644
index 0000000..1e27f97
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/output.go
@@ -0,0 +1,78 @@
1package ast
2
3import (
4 "bytes"
5 "fmt"
6)
7
8// Output represents the root node of all interpolation evaluations. If the
9// output only has one expression which is either a TypeList or TypeMap, the
10// Output can be type-asserted to []interface{} or map[string]interface{}
11// respectively. Otherwise the Output evaluates as a string, and concatenates
12// the evaluation of each expression.
13type Output struct {
14 Exprs []Node
15 Posx Pos
16}
17
18func (n *Output) Accept(v Visitor) Node {
19 for i, expr := range n.Exprs {
20 n.Exprs[i] = expr.Accept(v)
21 }
22
23 return v(n)
24}
25
26func (n *Output) Pos() Pos {
27 return n.Posx
28}
29
30func (n *Output) GoString() string {
31 return fmt.Sprintf("*%#v", *n)
32}
33
34func (n *Output) String() string {
35 var b bytes.Buffer
36 for _, expr := range n.Exprs {
37 b.WriteString(fmt.Sprintf("%s", expr))
38 }
39
40 return b.String()
41}
42
43func (n *Output) Type(s Scope) (Type, error) {
44 // Special case no expressions for backward compatibility
45 if len(n.Exprs) == 0 {
46 return TypeString, nil
47 }
48
49 // Special case a single expression of types list or map
50 if len(n.Exprs) == 1 {
51 exprType, err := n.Exprs[0].Type(s)
52 if err != nil {
53 return TypeInvalid, err
54 }
55 switch exprType {
56 case TypeList:
57 return TypeList, nil
58 case TypeMap:
59 return TypeMap, nil
60 }
61 }
62
63 // Otherwise ensure all our expressions are strings
64 for index, expr := range n.Exprs {
65 exprType, err := expr.Type(s)
66 if err != nil {
67 return TypeInvalid, err
68 }
69 // We only look for things we know we can't coerce with an implicit conversion func
70 if exprType == TypeList || exprType == TypeMap {
71 return TypeInvalid, fmt.Errorf(
72 "multi-expression HIL outputs may only have string inputs: %d is type %s",
73 index, exprType)
74 }
75 }
76
77 return TypeString, nil
78}
diff --git a/vendor/github.com/hashicorp/hil/ast/scope.go b/vendor/github.com/hashicorp/hil/ast/scope.go
new file mode 100644
index 0000000..7a975d9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/scope.go
@@ -0,0 +1,90 @@
1package ast
2
3import (
4 "fmt"
5 "reflect"
6)
7
8// Scope is the interface used to look up variables and functions while
9// evaluating. How these functions/variables are defined are up to the caller.
10type Scope interface {
11 LookupFunc(string) (Function, bool)
12 LookupVar(string) (Variable, bool)
13}
14
15// Variable is a variable value for execution given as input to the engine.
16// It records the value of a variables along with their type.
17type Variable struct {
18 Value interface{}
19 Type Type
20}
21
22// NewVariable creates a new Variable for the given value. This will
23// attempt to infer the correct type. If it can't, an error will be returned.
24func NewVariable(v interface{}) (result Variable, err error) {
25 switch v := reflect.ValueOf(v); v.Kind() {
26 case reflect.String:
27 result.Type = TypeString
28 default:
29 err = fmt.Errorf("Unknown type: %s", v.Kind())
30 }
31
32 result.Value = v
33 return
34}
35
36// String implements Stringer on Variable, displaying the type and value
37// of the Variable.
38func (v Variable) String() string {
39 return fmt.Sprintf("{Variable (%s): %+v}", v.Type, v.Value)
40}
41
42// Function defines a function that can be executed by the engine.
43// The type checker will validate that the proper types will be called
44// to the callback.
45type Function struct {
46 // ArgTypes is the list of types in argument order. These are the
47 // required arguments.
48 //
49 // ReturnType is the type of the returned value. The Callback MUST
50 // return this type.
51 ArgTypes []Type
52 ReturnType Type
53
54 // Variadic, if true, says that this function is variadic, meaning
55 // it takes a variable number of arguments. In this case, the
56 // VariadicType must be set.
57 Variadic bool
58 VariadicType Type
59
60 // Callback is the function called for a function. The argument
61 // types are guaranteed to match the spec above by the type checker.
62 // The length of the args is strictly == len(ArgTypes) unless Varidiac
63 // is true, in which case its >= len(ArgTypes).
64 Callback func([]interface{}) (interface{}, error)
65}
66
67// BasicScope is a simple scope that looks up variables and functions
68// using a map.
69type BasicScope struct {
70 FuncMap map[string]Function
71 VarMap map[string]Variable
72}
73
74func (s *BasicScope) LookupFunc(n string) (Function, bool) {
75 if s == nil {
76 return Function{}, false
77 }
78
79 v, ok := s.FuncMap[n]
80 return v, ok
81}
82
83func (s *BasicScope) LookupVar(n string) (Variable, bool) {
84 if s == nil {
85 return Variable{}, false
86 }
87
88 v, ok := s.VarMap[n]
89 return v, ok
90}
diff --git a/vendor/github.com/hashicorp/hil/ast/stack.go b/vendor/github.com/hashicorp/hil/ast/stack.go
new file mode 100644
index 0000000..bd2bc15
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/stack.go
@@ -0,0 +1,25 @@
1package ast
2
3// Stack is a stack of Node.
4type Stack struct {
5 stack []Node
6}
7
8func (s *Stack) Len() int {
9 return len(s.stack)
10}
11
12func (s *Stack) Push(n Node) {
13 s.stack = append(s.stack, n)
14}
15
16func (s *Stack) Pop() Node {
17 x := s.stack[len(s.stack)-1]
18 s.stack[len(s.stack)-1] = nil
19 s.stack = s.stack[:len(s.stack)-1]
20 return x
21}
22
23func (s *Stack) Reset() {
24 s.stack = nil
25}
diff --git a/vendor/github.com/hashicorp/hil/ast/type_string.go b/vendor/github.com/hashicorp/hil/ast/type_string.go
new file mode 100644
index 0000000..1f51a98
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/type_string.go
@@ -0,0 +1,54 @@
1// Code generated by "stringer -type=Type"; DO NOT EDIT
2
3package ast
4
5import "fmt"
6
7const (
8 _Type_name_0 = "TypeInvalid"
9 _Type_name_1 = "TypeAny"
10 _Type_name_2 = "TypeBool"
11 _Type_name_3 = "TypeString"
12 _Type_name_4 = "TypeInt"
13 _Type_name_5 = "TypeFloat"
14 _Type_name_6 = "TypeList"
15 _Type_name_7 = "TypeMap"
16 _Type_name_8 = "TypeUnknown"
17)
18
19var (
20 _Type_index_0 = [...]uint8{0, 11}
21 _Type_index_1 = [...]uint8{0, 7}
22 _Type_index_2 = [...]uint8{0, 8}
23 _Type_index_3 = [...]uint8{0, 10}
24 _Type_index_4 = [...]uint8{0, 7}
25 _Type_index_5 = [...]uint8{0, 9}
26 _Type_index_6 = [...]uint8{0, 8}
27 _Type_index_7 = [...]uint8{0, 7}
28 _Type_index_8 = [...]uint8{0, 11}
29)
30
31func (i Type) String() string {
32 switch {
33 case i == 0:
34 return _Type_name_0
35 case i == 2:
36 return _Type_name_1
37 case i == 4:
38 return _Type_name_2
39 case i == 8:
40 return _Type_name_3
41 case i == 16:
42 return _Type_name_4
43 case i == 32:
44 return _Type_name_5
45 case i == 64:
46 return _Type_name_6
47 case i == 128:
48 return _Type_name_7
49 case i == 256:
50 return _Type_name_8
51 default:
52 return fmt.Sprintf("Type(%d)", i)
53 }
54}
diff --git a/vendor/github.com/hashicorp/hil/ast/unknown.go b/vendor/github.com/hashicorp/hil/ast/unknown.go
new file mode 100644
index 0000000..d6ddaec
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/unknown.go
@@ -0,0 +1,30 @@
1package ast
2
3// IsUnknown reports whether a variable is unknown or contains any value
4// that is unknown. This will recurse into lists and maps and so on.
5func IsUnknown(v Variable) bool {
6 // If it is unknown itself, return true
7 if v.Type == TypeUnknown {
8 return true
9 }
10
11 // If it is a container type, check the values
12 switch v.Type {
13 case TypeList:
14 for _, el := range v.Value.([]Variable) {
15 if IsUnknown(el) {
16 return true
17 }
18 }
19 case TypeMap:
20 for _, el := range v.Value.(map[string]Variable) {
21 if IsUnknown(el) {
22 return true
23 }
24 }
25 default:
26 }
27
28 // Not a container type or survive the above checks
29 return false
30}
diff --git a/vendor/github.com/hashicorp/hil/ast/variable_access.go b/vendor/github.com/hashicorp/hil/ast/variable_access.go
new file mode 100644
index 0000000..4c1362d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/variable_access.go
@@ -0,0 +1,36 @@
1package ast
2
3import (
4 "fmt"
5)
6
7// VariableAccess represents a variable access.
8type VariableAccess struct {
9 Name string
10 Posx Pos
11}
12
13func (n *VariableAccess) Accept(v Visitor) Node {
14 return v(n)
15}
16
17func (n *VariableAccess) Pos() Pos {
18 return n.Posx
19}
20
21func (n *VariableAccess) GoString() string {
22 return fmt.Sprintf("*%#v", *n)
23}
24
25func (n *VariableAccess) String() string {
26 return fmt.Sprintf("Variable(%s)", n.Name)
27}
28
29func (n *VariableAccess) Type(s Scope) (Type, error) {
30 v, ok := s.LookupVar(n.Name)
31 if !ok {
32 return TypeInvalid, fmt.Errorf("unknown variable: %s", n.Name)
33 }
34
35 return v.Type, nil
36}
diff --git a/vendor/github.com/hashicorp/hil/ast/variables_helper.go b/vendor/github.com/hashicorp/hil/ast/variables_helper.go
new file mode 100644
index 0000000..06bd18d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/ast/variables_helper.go
@@ -0,0 +1,63 @@
1package ast
2
3import "fmt"
4
5func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) {
6 if len(list) == 0 {
7 return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName)
8 }
9
10 elemType := TypeUnknown
11 for _, v := range list {
12 if v.Type == TypeUnknown {
13 continue
14 }
15
16 if elemType == TypeUnknown {
17 elemType = v.Type
18 continue
19 }
20
21 if v.Type != elemType {
22 return TypeInvalid, fmt.Errorf(
23 "list %q does not have homogenous types. found %s and then %s",
24 variableName,
25 elemType, v.Type,
26 )
27 }
28
29 elemType = v.Type
30 }
31
32 return elemType, nil
33}
34
35func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) {
36 if len(vmap) == 0 {
37 return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName)
38 }
39
40 elemType := TypeUnknown
41 for _, v := range vmap {
42 if v.Type == TypeUnknown {
43 continue
44 }
45
46 if elemType == TypeUnknown {
47 elemType = v.Type
48 continue
49 }
50
51 if v.Type != elemType {
52 return TypeInvalid, fmt.Errorf(
53 "map %q does not have homogenous types. found %s and then %s",
54 variableName,
55 elemType, v.Type,
56 )
57 }
58
59 elemType = v.Type
60 }
61
62 return elemType, nil
63}
diff --git a/vendor/github.com/hashicorp/hil/builtins.go b/vendor/github.com/hashicorp/hil/builtins.go
new file mode 100644
index 0000000..909c788
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/builtins.go
@@ -0,0 +1,331 @@
1package hil
2
3import (
4 "errors"
5 "strconv"
6
7 "github.com/hashicorp/hil/ast"
8)
9
10// NOTE: All builtins are tested in engine_test.go
11
12func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope {
13 if scope == nil {
14 scope = new(ast.BasicScope)
15 }
16 if scope.FuncMap == nil {
17 scope.FuncMap = make(map[string]ast.Function)
18 }
19
20 // Implicit conversions
21 scope.FuncMap["__builtin_BoolToString"] = builtinBoolToString()
22 scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt()
23 scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString()
24 scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat()
25 scope.FuncMap["__builtin_IntToString"] = builtinIntToString()
26 scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt()
27 scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat()
28 scope.FuncMap["__builtin_StringToBool"] = builtinStringToBool()
29
30 // Math operations
31 scope.FuncMap["__builtin_IntMath"] = builtinIntMath()
32 scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath()
33 scope.FuncMap["__builtin_BoolCompare"] = builtinBoolCompare()
34 scope.FuncMap["__builtin_FloatCompare"] = builtinFloatCompare()
35 scope.FuncMap["__builtin_IntCompare"] = builtinIntCompare()
36 scope.FuncMap["__builtin_StringCompare"] = builtinStringCompare()
37 scope.FuncMap["__builtin_Logical"] = builtinLogical()
38 return scope
39}
40
41func builtinFloatMath() ast.Function {
42 return ast.Function{
43 ArgTypes: []ast.Type{ast.TypeInt},
44 Variadic: true,
45 VariadicType: ast.TypeFloat,
46 ReturnType: ast.TypeFloat,
47 Callback: func(args []interface{}) (interface{}, error) {
48 op := args[0].(ast.ArithmeticOp)
49 result := args[1].(float64)
50 for _, raw := range args[2:] {
51 arg := raw.(float64)
52 switch op {
53 case ast.ArithmeticOpAdd:
54 result += arg
55 case ast.ArithmeticOpSub:
56 result -= arg
57 case ast.ArithmeticOpMul:
58 result *= arg
59 case ast.ArithmeticOpDiv:
60 result /= arg
61 }
62 }
63
64 return result, nil
65 },
66 }
67}
68
69func builtinIntMath() ast.Function {
70 return ast.Function{
71 ArgTypes: []ast.Type{ast.TypeInt},
72 Variadic: true,
73 VariadicType: ast.TypeInt,
74 ReturnType: ast.TypeInt,
75 Callback: func(args []interface{}) (interface{}, error) {
76 op := args[0].(ast.ArithmeticOp)
77 result := args[1].(int)
78 for _, raw := range args[2:] {
79 arg := raw.(int)
80 switch op {
81 case ast.ArithmeticOpAdd:
82 result += arg
83 case ast.ArithmeticOpSub:
84 result -= arg
85 case ast.ArithmeticOpMul:
86 result *= arg
87 case ast.ArithmeticOpDiv:
88 if arg == 0 {
89 return nil, errors.New("divide by zero")
90 }
91
92 result /= arg
93 case ast.ArithmeticOpMod:
94 if arg == 0 {
95 return nil, errors.New("divide by zero")
96 }
97
98 result = result % arg
99 }
100 }
101
102 return result, nil
103 },
104 }
105}
106
107func builtinBoolCompare() ast.Function {
108 return ast.Function{
109 ArgTypes: []ast.Type{ast.TypeInt, ast.TypeBool, ast.TypeBool},
110 Variadic: false,
111 ReturnType: ast.TypeBool,
112 Callback: func(args []interface{}) (interface{}, error) {
113 op := args[0].(ast.ArithmeticOp)
114 lhs := args[1].(bool)
115 rhs := args[2].(bool)
116
117 switch op {
118 case ast.ArithmeticOpEqual:
119 return lhs == rhs, nil
120 case ast.ArithmeticOpNotEqual:
121 return lhs != rhs, nil
122 default:
123 return nil, errors.New("invalid comparison operation")
124 }
125 },
126 }
127}
128
129func builtinFloatCompare() ast.Function {
130 return ast.Function{
131 ArgTypes: []ast.Type{ast.TypeInt, ast.TypeFloat, ast.TypeFloat},
132 Variadic: false,
133 ReturnType: ast.TypeBool,
134 Callback: func(args []interface{}) (interface{}, error) {
135 op := args[0].(ast.ArithmeticOp)
136 lhs := args[1].(float64)
137 rhs := args[2].(float64)
138
139 switch op {
140 case ast.ArithmeticOpEqual:
141 return lhs == rhs, nil
142 case ast.ArithmeticOpNotEqual:
143 return lhs != rhs, nil
144 case ast.ArithmeticOpLessThan:
145 return lhs < rhs, nil
146 case ast.ArithmeticOpLessThanOrEqual:
147 return lhs <= rhs, nil
148 case ast.ArithmeticOpGreaterThan:
149 return lhs > rhs, nil
150 case ast.ArithmeticOpGreaterThanOrEqual:
151 return lhs >= rhs, nil
152 default:
153 return nil, errors.New("invalid comparison operation")
154 }
155 },
156 }
157}
158
159func builtinIntCompare() ast.Function {
160 return ast.Function{
161 ArgTypes: []ast.Type{ast.TypeInt, ast.TypeInt, ast.TypeInt},
162 Variadic: false,
163 ReturnType: ast.TypeBool,
164 Callback: func(args []interface{}) (interface{}, error) {
165 op := args[0].(ast.ArithmeticOp)
166 lhs := args[1].(int)
167 rhs := args[2].(int)
168
169 switch op {
170 case ast.ArithmeticOpEqual:
171 return lhs == rhs, nil
172 case ast.ArithmeticOpNotEqual:
173 return lhs != rhs, nil
174 case ast.ArithmeticOpLessThan:
175 return lhs < rhs, nil
176 case ast.ArithmeticOpLessThanOrEqual:
177 return lhs <= rhs, nil
178 case ast.ArithmeticOpGreaterThan:
179 return lhs > rhs, nil
180 case ast.ArithmeticOpGreaterThanOrEqual:
181 return lhs >= rhs, nil
182 default:
183 return nil, errors.New("invalid comparison operation")
184 }
185 },
186 }
187}
188
189func builtinStringCompare() ast.Function {
190 return ast.Function{
191 ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString, ast.TypeString},
192 Variadic: false,
193 ReturnType: ast.TypeBool,
194 Callback: func(args []interface{}) (interface{}, error) {
195 op := args[0].(ast.ArithmeticOp)
196 lhs := args[1].(string)
197 rhs := args[2].(string)
198
199 switch op {
200 case ast.ArithmeticOpEqual:
201 return lhs == rhs, nil
202 case ast.ArithmeticOpNotEqual:
203 return lhs != rhs, nil
204 default:
205 return nil, errors.New("invalid comparison operation")
206 }
207 },
208 }
209}
210
211func builtinLogical() ast.Function {
212 return ast.Function{
213 ArgTypes: []ast.Type{ast.TypeInt},
214 Variadic: true,
215 VariadicType: ast.TypeBool,
216 ReturnType: ast.TypeBool,
217 Callback: func(args []interface{}) (interface{}, error) {
218 op := args[0].(ast.ArithmeticOp)
219 result := args[1].(bool)
220 for _, raw := range args[2:] {
221 arg := raw.(bool)
222 switch op {
223 case ast.ArithmeticOpLogicalOr:
224 result = result || arg
225 case ast.ArithmeticOpLogicalAnd:
226 result = result && arg
227 default:
228 return nil, errors.New("invalid logical operator")
229 }
230 }
231
232 return result, nil
233 },
234 }
235}
236
237func builtinFloatToInt() ast.Function {
238 return ast.Function{
239 ArgTypes: []ast.Type{ast.TypeFloat},
240 ReturnType: ast.TypeInt,
241 Callback: func(args []interface{}) (interface{}, error) {
242 return int(args[0].(float64)), nil
243 },
244 }
245}
246
247func builtinFloatToString() ast.Function {
248 return ast.Function{
249 ArgTypes: []ast.Type{ast.TypeFloat},
250 ReturnType: ast.TypeString,
251 Callback: func(args []interface{}) (interface{}, error) {
252 return strconv.FormatFloat(
253 args[0].(float64), 'g', -1, 64), nil
254 },
255 }
256}
257
258func builtinIntToFloat() ast.Function {
259 return ast.Function{
260 ArgTypes: []ast.Type{ast.TypeInt},
261 ReturnType: ast.TypeFloat,
262 Callback: func(args []interface{}) (interface{}, error) {
263 return float64(args[0].(int)), nil
264 },
265 }
266}
267
268func builtinIntToString() ast.Function {
269 return ast.Function{
270 ArgTypes: []ast.Type{ast.TypeInt},
271 ReturnType: ast.TypeString,
272 Callback: func(args []interface{}) (interface{}, error) {
273 return strconv.FormatInt(int64(args[0].(int)), 10), nil
274 },
275 }
276}
277
278func builtinStringToInt() ast.Function {
279 return ast.Function{
280 ArgTypes: []ast.Type{ast.TypeInt},
281 ReturnType: ast.TypeString,
282 Callback: func(args []interface{}) (interface{}, error) {
283 v, err := strconv.ParseInt(args[0].(string), 0, 0)
284 if err != nil {
285 return nil, err
286 }
287
288 return int(v), nil
289 },
290 }
291}
292
293func builtinStringToFloat() ast.Function {
294 return ast.Function{
295 ArgTypes: []ast.Type{ast.TypeString},
296 ReturnType: ast.TypeFloat,
297 Callback: func(args []interface{}) (interface{}, error) {
298 v, err := strconv.ParseFloat(args[0].(string), 64)
299 if err != nil {
300 return nil, err
301 }
302
303 return v, nil
304 },
305 }
306}
307
308func builtinBoolToString() ast.Function {
309 return ast.Function{
310 ArgTypes: []ast.Type{ast.TypeBool},
311 ReturnType: ast.TypeString,
312 Callback: func(args []interface{}) (interface{}, error) {
313 return strconv.FormatBool(args[0].(bool)), nil
314 },
315 }
316}
317
318func builtinStringToBool() ast.Function {
319 return ast.Function{
320 ArgTypes: []ast.Type{ast.TypeString},
321 ReturnType: ast.TypeBool,
322 Callback: func(args []interface{}) (interface{}, error) {
323 v, err := strconv.ParseBool(args[0].(string))
324 if err != nil {
325 return nil, err
326 }
327
328 return v, nil
329 },
330 }
331}
diff --git a/vendor/github.com/hashicorp/hil/check_identifier.go b/vendor/github.com/hashicorp/hil/check_identifier.go
new file mode 100644
index 0000000..474f505
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/check_identifier.go
@@ -0,0 +1,88 @@
1package hil
2
3import (
4 "fmt"
5 "sync"
6
7 "github.com/hashicorp/hil/ast"
8)
9
10// IdentifierCheck is a SemanticCheck that checks that all identifiers
11// resolve properly and that the right number of arguments are passed
12// to functions.
13type IdentifierCheck struct {
14 Scope ast.Scope
15
16 err error
17 lock sync.Mutex
18}
19
20func (c *IdentifierCheck) Visit(root ast.Node) error {
21 c.lock.Lock()
22 defer c.lock.Unlock()
23 defer c.reset()
24 root.Accept(c.visit)
25 return c.err
26}
27
28func (c *IdentifierCheck) visit(raw ast.Node) ast.Node {
29 if c.err != nil {
30 return raw
31 }
32
33 switch n := raw.(type) {
34 case *ast.Call:
35 c.visitCall(n)
36 case *ast.VariableAccess:
37 c.visitVariableAccess(n)
38 case *ast.Output:
39 // Ignore
40 case *ast.LiteralNode:
41 // Ignore
42 default:
43 // Ignore
44 }
45
46 // We never do replacement with this visitor
47 return raw
48}
49
50func (c *IdentifierCheck) visitCall(n *ast.Call) {
51 // Look up the function in the map
52 function, ok := c.Scope.LookupFunc(n.Func)
53 if !ok {
54 c.createErr(n, fmt.Sprintf("unknown function called: %s", n.Func))
55 return
56 }
57
58 // Break up the args into what is variadic and what is required
59 args := n.Args
60 if function.Variadic && len(args) > len(function.ArgTypes) {
61 args = n.Args[:len(function.ArgTypes)]
62 }
63
64 // Verify the number of arguments
65 if len(args) != len(function.ArgTypes) {
66 c.createErr(n, fmt.Sprintf(
67 "%s: expected %d arguments, got %d",
68 n.Func, len(function.ArgTypes), len(n.Args)))
69 return
70 }
71}
72
73func (c *IdentifierCheck) visitVariableAccess(n *ast.VariableAccess) {
74 // Look up the variable in the map
75 if _, ok := c.Scope.LookupVar(n.Name); !ok {
76 c.createErr(n, fmt.Sprintf(
77 "unknown variable accessed: %s", n.Name))
78 return
79 }
80}
81
82func (c *IdentifierCheck) createErr(n ast.Node, str string) {
83 c.err = fmt.Errorf("%s: %s", n.Pos(), str)
84}
85
86func (c *IdentifierCheck) reset() {
87 c.err = nil
88}
diff --git a/vendor/github.com/hashicorp/hil/check_types.go b/vendor/github.com/hashicorp/hil/check_types.go
new file mode 100644
index 0000000..f16da39
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/check_types.go
@@ -0,0 +1,668 @@
1package hil
2
3import (
4 "fmt"
5 "sync"
6
7 "github.com/hashicorp/hil/ast"
8)
9
10// TypeCheck implements ast.Visitor for type checking an AST tree.
11// It requires some configuration to look up the type of nodes.
12//
13// It also optionally will not type error and will insert an implicit
14// type conversions for specific types if specified by the Implicit
15// field. Note that this is kind of organizationally weird to put into
16// this structure but we'd rather do that than duplicate the type checking
17// logic multiple times.
18type TypeCheck struct {
19 Scope ast.Scope
20
21 // Implicit is a map of implicit type conversions that we can do,
22 // and that shouldn't error. The key of the first map is the from type,
23 // the key of the second map is the to type, and the final string
24 // value is the function to call (which must be registered in the Scope).
25 Implicit map[ast.Type]map[ast.Type]string
26
27 // Stack of types. This shouldn't be used directly except by implementations
28 // of TypeCheckNode.
29 Stack []ast.Type
30
31 err error
32 lock sync.Mutex
33}
34
35// TypeCheckNode is the interface that must be implemented by any
36// ast.Node that wants to support type-checking. If the type checker
37// encounters a node that doesn't implement this, it will error.
38type TypeCheckNode interface {
39 TypeCheck(*TypeCheck) (ast.Node, error)
40}
41
42func (v *TypeCheck) Visit(root ast.Node) error {
43 v.lock.Lock()
44 defer v.lock.Unlock()
45 defer v.reset()
46 root.Accept(v.visit)
47
48 // If the resulting type is unknown, then just let the whole thing go.
49 if v.err == errExitUnknown {
50 v.err = nil
51 }
52
53 return v.err
54}
55
56func (v *TypeCheck) visit(raw ast.Node) ast.Node {
57 if v.err != nil {
58 return raw
59 }
60
61 var result ast.Node
62 var err error
63 switch n := raw.(type) {
64 case *ast.Arithmetic:
65 tc := &typeCheckArithmetic{n}
66 result, err = tc.TypeCheck(v)
67 case *ast.Call:
68 tc := &typeCheckCall{n}
69 result, err = tc.TypeCheck(v)
70 case *ast.Conditional:
71 tc := &typeCheckConditional{n}
72 result, err = tc.TypeCheck(v)
73 case *ast.Index:
74 tc := &typeCheckIndex{n}
75 result, err = tc.TypeCheck(v)
76 case *ast.Output:
77 tc := &typeCheckOutput{n}
78 result, err = tc.TypeCheck(v)
79 case *ast.LiteralNode:
80 tc := &typeCheckLiteral{n}
81 result, err = tc.TypeCheck(v)
82 case *ast.VariableAccess:
83 tc := &typeCheckVariableAccess{n}
84 result, err = tc.TypeCheck(v)
85 default:
86 tc, ok := raw.(TypeCheckNode)
87 if !ok {
88 err = fmt.Errorf("unknown node for type check: %#v", raw)
89 break
90 }
91
92 result, err = tc.TypeCheck(v)
93 }
94
95 if err != nil {
96 pos := raw.Pos()
97 v.err = fmt.Errorf("At column %d, line %d: %s",
98 pos.Column, pos.Line, err)
99 }
100
101 return result
102}
103
104type typeCheckArithmetic struct {
105 n *ast.Arithmetic
106}
107
108func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) {
109 // The arguments are on the stack in reverse order, so pop them off.
110 exprs := make([]ast.Type, len(tc.n.Exprs))
111 for i, _ := range tc.n.Exprs {
112 exprs[len(tc.n.Exprs)-1-i] = v.StackPop()
113 }
114
115 // If any operand is unknown then our result is automatically unknown
116 for _, ty := range exprs {
117 if ty == ast.TypeUnknown {
118 v.StackPush(ast.TypeUnknown)
119 return tc.n, nil
120 }
121 }
122
123 switch tc.n.Op {
124 case ast.ArithmeticOpLogicalAnd, ast.ArithmeticOpLogicalOr:
125 return tc.checkLogical(v, exprs)
126 case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual,
127 ast.ArithmeticOpLessThan, ast.ArithmeticOpGreaterThan,
128 ast.ArithmeticOpGreaterThanOrEqual, ast.ArithmeticOpLessThanOrEqual:
129 return tc.checkComparison(v, exprs)
130 default:
131 return tc.checkNumeric(v, exprs)
132 }
133
134}
135
136func (tc *typeCheckArithmetic) checkNumeric(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
137 // Determine the resulting type we want. We do this by going over
138 // every expression until we find one with a type we recognize.
139 // We do this because the first expr might be a string ("var.foo")
140 // and we need to know what to implicit to.
141 mathFunc := "__builtin_IntMath"
142 mathType := ast.TypeInt
143 for _, v := range exprs {
144 // We assume int math but if we find ANY float, the entire
145 // expression turns into floating point math.
146 if v == ast.TypeFloat {
147 mathFunc = "__builtin_FloatMath"
148 mathType = v
149 break
150 }
151 }
152
153 // Verify the args
154 for i, arg := range exprs {
155 if arg != mathType {
156 cn := v.ImplicitConversion(exprs[i], mathType, tc.n.Exprs[i])
157 if cn != nil {
158 tc.n.Exprs[i] = cn
159 continue
160 }
161
162 return nil, fmt.Errorf(
163 "operand %d should be %s, got %s",
164 i+1, mathType, arg)
165 }
166 }
167
168 // Modulo doesn't work for floats
169 if mathType == ast.TypeFloat && tc.n.Op == ast.ArithmeticOpMod {
170 return nil, fmt.Errorf("modulo cannot be used with floats")
171 }
172
173 // Return type
174 v.StackPush(mathType)
175
176 // Replace our node with a call to the proper function. This isn't
177 // type checked but we already verified types.
178 args := make([]ast.Node, len(tc.n.Exprs)+1)
179 args[0] = &ast.LiteralNode{
180 Value: tc.n.Op,
181 Typex: ast.TypeInt,
182 Posx: tc.n.Pos(),
183 }
184 copy(args[1:], tc.n.Exprs)
185 return &ast.Call{
186 Func: mathFunc,
187 Args: args,
188 Posx: tc.n.Pos(),
189 }, nil
190}
191
192func (tc *typeCheckArithmetic) checkComparison(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
193 if len(exprs) != 2 {
194 // This should never happen, because the parser never produces
195 // nodes that violate this.
196 return nil, fmt.Errorf(
197 "comparison operators must have exactly two operands",
198 )
199 }
200
201 // The first operand always dictates the type for a comparison.
202 compareFunc := ""
203 compareType := exprs[0]
204 switch compareType {
205 case ast.TypeBool:
206 compareFunc = "__builtin_BoolCompare"
207 case ast.TypeFloat:
208 compareFunc = "__builtin_FloatCompare"
209 case ast.TypeInt:
210 compareFunc = "__builtin_IntCompare"
211 case ast.TypeString:
212 compareFunc = "__builtin_StringCompare"
213 default:
214 return nil, fmt.Errorf(
215 "comparison operators apply only to bool, float, int, and string",
216 )
217 }
218
219 // For non-equality comparisons, we will do implicit conversions to
220 // integer types if possible. In this case, we need to go through and
221 // determine the type of comparison we're doing to enable the implicit
222 // conversion.
223 if tc.n.Op != ast.ArithmeticOpEqual && tc.n.Op != ast.ArithmeticOpNotEqual {
224 compareFunc = "__builtin_IntCompare"
225 compareType = ast.TypeInt
226 for _, expr := range exprs {
227 if expr == ast.TypeFloat {
228 compareFunc = "__builtin_FloatCompare"
229 compareType = ast.TypeFloat
230 break
231 }
232 }
233 }
234
235 // Verify (and possibly, convert) the args
236 for i, arg := range exprs {
237 if arg != compareType {
238 cn := v.ImplicitConversion(exprs[i], compareType, tc.n.Exprs[i])
239 if cn != nil {
240 tc.n.Exprs[i] = cn
241 continue
242 }
243
244 return nil, fmt.Errorf(
245 "operand %d should be %s, got %s",
246 i+1, compareType, arg,
247 )
248 }
249 }
250
251 // Only ints and floats can have the <, >, <= and >= operators applied
252 switch tc.n.Op {
253 case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual:
254 // anything goes
255 default:
256 switch compareType {
257 case ast.TypeFloat, ast.TypeInt:
258 // fine
259 default:
260 return nil, fmt.Errorf(
261 "<, >, <= and >= may apply only to int and float values",
262 )
263 }
264 }
265
266 // Comparison operators always return bool
267 v.StackPush(ast.TypeBool)
268
269 // Replace our node with a call to the proper function. This isn't
270 // type checked but we already verified types.
271 args := make([]ast.Node, len(tc.n.Exprs)+1)
272 args[0] = &ast.LiteralNode{
273 Value: tc.n.Op,
274 Typex: ast.TypeInt,
275 Posx: tc.n.Pos(),
276 }
277 copy(args[1:], tc.n.Exprs)
278 return &ast.Call{
279 Func: compareFunc,
280 Args: args,
281 Posx: tc.n.Pos(),
282 }, nil
283}
284
285func (tc *typeCheckArithmetic) checkLogical(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
286 for i, t := range exprs {
287 if t != ast.TypeBool {
288 cn := v.ImplicitConversion(t, ast.TypeBool, tc.n.Exprs[i])
289 if cn == nil {
290 return nil, fmt.Errorf(
291 "logical operators require boolean operands, not %s",
292 t,
293 )
294 }
295 tc.n.Exprs[i] = cn
296 }
297 }
298
299 // Return type is always boolean
300 v.StackPush(ast.TypeBool)
301
302 // Arithmetic nodes are replaced with a call to a built-in function
303 args := make([]ast.Node, len(tc.n.Exprs)+1)
304 args[0] = &ast.LiteralNode{
305 Value: tc.n.Op,
306 Typex: ast.TypeInt,
307 Posx: tc.n.Pos(),
308 }
309 copy(args[1:], tc.n.Exprs)
310 return &ast.Call{
311 Func: "__builtin_Logical",
312 Args: args,
313 Posx: tc.n.Pos(),
314 }, nil
315}
316
317type typeCheckCall struct {
318 n *ast.Call
319}
320
321func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) {
322 // Look up the function in the map
323 function, ok := v.Scope.LookupFunc(tc.n.Func)
324 if !ok {
325 return nil, fmt.Errorf("unknown function called: %s", tc.n.Func)
326 }
327
328 // The arguments are on the stack in reverse order, so pop them off.
329 args := make([]ast.Type, len(tc.n.Args))
330 for i, _ := range tc.n.Args {
331 args[len(tc.n.Args)-1-i] = v.StackPop()
332 }
333
334 // Verify the args
335 for i, expected := range function.ArgTypes {
336 if expected == ast.TypeAny {
337 continue
338 }
339
340 if args[i] == ast.TypeUnknown {
341 v.StackPush(ast.TypeUnknown)
342 return tc.n, nil
343 }
344
345 if args[i] != expected {
346 cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i])
347 if cn != nil {
348 tc.n.Args[i] = cn
349 continue
350 }
351
352 return nil, fmt.Errorf(
353 "%s: argument %d should be %s, got %s",
354 tc.n.Func, i+1, expected.Printable(), args[i].Printable())
355 }
356 }
357
358 // If we're variadic, then verify the types there
359 if function.Variadic && function.VariadicType != ast.TypeAny {
360 args = args[len(function.ArgTypes):]
361 for i, t := range args {
362 if t == ast.TypeUnknown {
363 v.StackPush(ast.TypeUnknown)
364 return tc.n, nil
365 }
366
367 if t != function.VariadicType {
368 realI := i + len(function.ArgTypes)
369 cn := v.ImplicitConversion(
370 t, function.VariadicType, tc.n.Args[realI])
371 if cn != nil {
372 tc.n.Args[realI] = cn
373 continue
374 }
375
376 return nil, fmt.Errorf(
377 "%s: argument %d should be %s, got %s",
378 tc.n.Func, realI,
379 function.VariadicType.Printable(), t.Printable())
380 }
381 }
382 }
383
384 // Return type
385 v.StackPush(function.ReturnType)
386
387 return tc.n, nil
388}
389
390type typeCheckConditional struct {
391 n *ast.Conditional
392}
393
394func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) {
395 // On the stack we have the types of the condition, true and false
396 // expressions, but they are in reverse order.
397 falseType := v.StackPop()
398 trueType := v.StackPop()
399 condType := v.StackPop()
400
401 if condType == ast.TypeUnknown {
402 v.StackPush(ast.TypeUnknown)
403 return tc.n, nil
404 }
405
406 if condType != ast.TypeBool {
407 cn := v.ImplicitConversion(condType, ast.TypeBool, tc.n.CondExpr)
408 if cn == nil {
409 return nil, fmt.Errorf(
410 "condition must be type bool, not %s", condType.Printable(),
411 )
412 }
413 tc.n.CondExpr = cn
414 }
415
416 // The types of the true and false expression must match
417 if trueType != falseType && trueType != ast.TypeUnknown && falseType != ast.TypeUnknown {
418
419 // Since passing around stringified versions of other types is
420 // common, we pragmatically allow the false expression to dictate
421 // the result type when the true expression is a string.
422 if trueType == ast.TypeString {
423 cn := v.ImplicitConversion(trueType, falseType, tc.n.TrueExpr)
424 if cn == nil {
425 return nil, fmt.Errorf(
426 "true and false expression types must match; have %s and %s",
427 trueType.Printable(), falseType.Printable(),
428 )
429 }
430 tc.n.TrueExpr = cn
431 trueType = falseType
432 } else {
433 cn := v.ImplicitConversion(falseType, trueType, tc.n.FalseExpr)
434 if cn == nil {
435 return nil, fmt.Errorf(
436 "true and false expression types must match; have %s and %s",
437 trueType.Printable(), falseType.Printable(),
438 )
439 }
440 tc.n.FalseExpr = cn
441 falseType = trueType
442 }
443 }
444
445 // Currently list and map types cannot be used, because we cannot
446 // generally assert that their element types are consistent.
447 // Such support might be added later, either by improving the type
448 // system or restricting usage to only variable and literal expressions,
449 // but for now this is simply prohibited because it doesn't seem to
450 // be a common enough case to be worth the complexity.
451 switch trueType {
452 case ast.TypeList:
453 return nil, fmt.Errorf(
454 "conditional operator cannot be used with list values",
455 )
456 case ast.TypeMap:
457 return nil, fmt.Errorf(
458 "conditional operator cannot be used with map values",
459 )
460 }
461
462 // Result type (guaranteed to also match falseType due to the above)
463 if trueType == ast.TypeUnknown {
464 // falseType may also be unknown, but that's okay because two
465 // unknowns means our result is unknown anyway.
466 v.StackPush(falseType)
467 } else {
468 v.StackPush(trueType)
469 }
470
471 return tc.n, nil
472}
473
474type typeCheckOutput struct {
475 n *ast.Output
476}
477
478func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) {
479 n := tc.n
480 types := make([]ast.Type, len(n.Exprs))
481 for i, _ := range n.Exprs {
482 types[len(n.Exprs)-1-i] = v.StackPop()
483 }
484
485 for _, ty := range types {
486 if ty == ast.TypeUnknown {
487 v.StackPush(ast.TypeUnknown)
488 return tc.n, nil
489 }
490 }
491
492 // If there is only one argument and it is a list, we evaluate to a list
493 if len(types) == 1 {
494 switch t := types[0]; t {
495 case ast.TypeList:
496 fallthrough
497 case ast.TypeMap:
498 v.StackPush(t)
499 return n, nil
500 }
501 }
502
503 // Otherwise, all concat args must be strings, so validate that
504 resultType := ast.TypeString
505 for i, t := range types {
506
507 if t == ast.TypeUnknown {
508 resultType = ast.TypeUnknown
509 continue
510 }
511
512 if t != ast.TypeString {
513 cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i])
514 if cn != nil {
515 n.Exprs[i] = cn
516 continue
517 }
518
519 return nil, fmt.Errorf(
520 "output of an HIL expression must be a string, or a single list (argument %d is %s)", i+1, t)
521 }
522 }
523
524 // This always results in type string, unless there are unknowns
525 v.StackPush(resultType)
526
527 return n, nil
528}
529
530type typeCheckLiteral struct {
531 n *ast.LiteralNode
532}
533
534func (tc *typeCheckLiteral) TypeCheck(v *TypeCheck) (ast.Node, error) {
535 v.StackPush(tc.n.Typex)
536 return tc.n, nil
537}
538
539type typeCheckVariableAccess struct {
540 n *ast.VariableAccess
541}
542
543func (tc *typeCheckVariableAccess) TypeCheck(v *TypeCheck) (ast.Node, error) {
544 // Look up the variable in the map
545 variable, ok := v.Scope.LookupVar(tc.n.Name)
546 if !ok {
547 return nil, fmt.Errorf(
548 "unknown variable accessed: %s", tc.n.Name)
549 }
550
551 // Add the type to the stack
552 v.StackPush(variable.Type)
553
554 return tc.n, nil
555}
556
557type typeCheckIndex struct {
558 n *ast.Index
559}
560
561func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) {
562 keyType := v.StackPop()
563 targetType := v.StackPop()
564
565 if keyType == ast.TypeUnknown || targetType == ast.TypeUnknown {
566 v.StackPush(ast.TypeUnknown)
567 return tc.n, nil
568 }
569
570 // Ensure we have a VariableAccess as the target
571 varAccessNode, ok := tc.n.Target.(*ast.VariableAccess)
572 if !ok {
573 return nil, fmt.Errorf(
574 "target of an index must be a VariableAccess node, was %T", tc.n.Target)
575 }
576
577 // Get the variable
578 variable, ok := v.Scope.LookupVar(varAccessNode.Name)
579 if !ok {
580 return nil, fmt.Errorf(
581 "unknown variable accessed: %s", varAccessNode.Name)
582 }
583
584 switch targetType {
585 case ast.TypeList:
586 if keyType != ast.TypeInt {
587 tc.n.Key = v.ImplicitConversion(keyType, ast.TypeInt, tc.n.Key)
588 if tc.n.Key == nil {
589 return nil, fmt.Errorf(
590 "key of an index must be an int, was %s", keyType)
591 }
592 }
593
594 valType, err := ast.VariableListElementTypesAreHomogenous(
595 varAccessNode.Name, variable.Value.([]ast.Variable))
596 if err != nil {
597 return tc.n, err
598 }
599
600 v.StackPush(valType)
601 return tc.n, nil
602 case ast.TypeMap:
603 if keyType != ast.TypeString {
604 tc.n.Key = v.ImplicitConversion(keyType, ast.TypeString, tc.n.Key)
605 if tc.n.Key == nil {
606 return nil, fmt.Errorf(
607 "key of an index must be a string, was %s", keyType)
608 }
609 }
610
611 valType, err := ast.VariableMapValueTypesAreHomogenous(
612 varAccessNode.Name, variable.Value.(map[string]ast.Variable))
613 if err != nil {
614 return tc.n, err
615 }
616
617 v.StackPush(valType)
618 return tc.n, nil
619 default:
620 return nil, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type)
621 }
622}
623
624func (v *TypeCheck) ImplicitConversion(
625 actual ast.Type, expected ast.Type, n ast.Node) ast.Node {
626 if v.Implicit == nil {
627 return nil
628 }
629
630 fromMap, ok := v.Implicit[actual]
631 if !ok {
632 return nil
633 }
634
635 toFunc, ok := fromMap[expected]
636 if !ok {
637 return nil
638 }
639
640 return &ast.Call{
641 Func: toFunc,
642 Args: []ast.Node{n},
643 Posx: n.Pos(),
644 }
645}
646
647func (v *TypeCheck) reset() {
648 v.Stack = nil
649 v.err = nil
650}
651
652func (v *TypeCheck) StackPush(t ast.Type) {
653 v.Stack = append(v.Stack, t)
654}
655
656func (v *TypeCheck) StackPop() ast.Type {
657 var x ast.Type
658 x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1]
659 return x
660}
661
662func (v *TypeCheck) StackPeek() ast.Type {
663 if len(v.Stack) == 0 {
664 return ast.TypeInvalid
665 }
666
667 return v.Stack[len(v.Stack)-1]
668}
diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go
new file mode 100644
index 0000000..f2024d0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/convert.go
@@ -0,0 +1,159 @@
1package hil
2
3import (
4 "fmt"
5 "reflect"
6
7 "github.com/hashicorp/hil/ast"
8 "github.com/mitchellh/mapstructure"
9)
10
11// UnknownValue is a sentinel value that can be used to denote
12// that a value of a variable (or map element, list element, etc.)
13// is unknown. This will always have the type ast.TypeUnknown.
14const UnknownValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
15
16var hilMapstructureDecodeHookSlice []interface{}
17var hilMapstructureDecodeHookStringSlice []string
18var hilMapstructureDecodeHookMap map[string]interface{}
19
20// hilMapstructureWeakDecode behaves in the same way as mapstructure.WeakDecode
21// but has a DecodeHook which defeats the backward compatibility mode of mapstructure
22// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This
23// allows us to use WeakDecode (desirable), but not fail on empty lists.
24func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error {
25 config := &mapstructure.DecoderConfig{
26 DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) {
27 sliceType := reflect.TypeOf(hilMapstructureDecodeHookSlice)
28 stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice)
29 mapType := reflect.TypeOf(hilMapstructureDecodeHookMap)
30
31 if (source == sliceType || source == stringSliceType) && target == mapType {
32 return nil, fmt.Errorf("Cannot convert %s into a %s", source, target)
33 }
34
35 return val, nil
36 },
37 WeaklyTypedInput: true,
38 Result: rawVal,
39 }
40
41 decoder, err := mapstructure.NewDecoder(config)
42 if err != nil {
43 return err
44 }
45
46 return decoder.Decode(m)
47}
48
49func InterfaceToVariable(input interface{}) (ast.Variable, error) {
50 if inputVariable, ok := input.(ast.Variable); ok {
51 return inputVariable, nil
52 }
53
54 var stringVal string
55 if err := hilMapstructureWeakDecode(input, &stringVal); err == nil {
56 // Special case the unknown value to turn into "unknown"
57 if stringVal == UnknownValue {
58 return ast.Variable{Value: UnknownValue, Type: ast.TypeUnknown}, nil
59 }
60
61 // Otherwise return the string value
62 return ast.Variable{
63 Type: ast.TypeString,
64 Value: stringVal,
65 }, nil
66 }
67
68 var mapVal map[string]interface{}
69 if err := hilMapstructureWeakDecode(input, &mapVal); err == nil {
70 elements := make(map[string]ast.Variable)
71 for i, element := range mapVal {
72 varElement, err := InterfaceToVariable(element)
73 if err != nil {
74 return ast.Variable{}, err
75 }
76 elements[i] = varElement
77 }
78
79 return ast.Variable{
80 Type: ast.TypeMap,
81 Value: elements,
82 }, nil
83 }
84
85 var sliceVal []interface{}
86 if err := hilMapstructureWeakDecode(input, &sliceVal); err == nil {
87 elements := make([]ast.Variable, len(sliceVal))
88 for i, element := range sliceVal {
89 varElement, err := InterfaceToVariable(element)
90 if err != nil {
91 return ast.Variable{}, err
92 }
93 elements[i] = varElement
94 }
95
96 return ast.Variable{
97 Type: ast.TypeList,
98 Value: elements,
99 }, nil
100 }
101
102 return ast.Variable{}, fmt.Errorf("value for conversion must be a string, interface{} or map[string]interface: got %T", input)
103}
104
105func VariableToInterface(input ast.Variable) (interface{}, error) {
106 if input.Type == ast.TypeString {
107 if inputStr, ok := input.Value.(string); ok {
108 return inputStr, nil
109 } else {
110 return nil, fmt.Errorf("ast.Variable with type string has value which is not a string")
111 }
112 }
113
114 if input.Type == ast.TypeList {
115 inputList, ok := input.Value.([]ast.Variable)
116 if !ok {
117 return nil, fmt.Errorf("ast.Variable with type list has value which is not a []ast.Variable")
118 }
119
120 result := make([]interface{}, 0)
121 if len(inputList) == 0 {
122 return result, nil
123 }
124
125 for _, element := range inputList {
126 if convertedElement, err := VariableToInterface(element); err == nil {
127 result = append(result, convertedElement)
128 } else {
129 return nil, err
130 }
131 }
132
133 return result, nil
134 }
135
136 if input.Type == ast.TypeMap {
137 inputMap, ok := input.Value.(map[string]ast.Variable)
138 if !ok {
139 return nil, fmt.Errorf("ast.Variable with type map has value which is not a map[string]ast.Variable")
140 }
141
142 result := make(map[string]interface{}, 0)
143 if len(inputMap) == 0 {
144 return result, nil
145 }
146
147 for key, value := range inputMap {
148 if convertedValue, err := VariableToInterface(value); err == nil {
149 result[key] = convertedValue
150 } else {
151 return nil, err
152 }
153 }
154
155 return result, nil
156 }
157
158 return nil, fmt.Errorf("unknown input type: %s", input.Type)
159}
diff --git a/vendor/github.com/hashicorp/hil/eval.go b/vendor/github.com/hashicorp/hil/eval.go
new file mode 100644
index 0000000..2782076
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/eval.go
@@ -0,0 +1,472 @@
1package hil
2
3import (
4 "bytes"
5 "errors"
6 "fmt"
7 "sync"
8
9 "github.com/hashicorp/hil/ast"
10)
11
12// EvalConfig is the configuration for evaluating.
13type EvalConfig struct {
14 // GlobalScope is the global scope of execution for evaluation.
15 GlobalScope *ast.BasicScope
16
17 // SemanticChecks is a list of additional semantic checks that will be run
18 // on the tree prior to evaluating it. The type checker, identifier checker,
19 // etc. will be run before these automatically.
20 SemanticChecks []SemanticChecker
21}
22
23// SemanticChecker is the type that must be implemented to do a
24// semantic check on an AST tree. This will be called with the root node.
25type SemanticChecker func(ast.Node) error
26
27// EvaluationResult is a struct returned from the hil.Eval function,
28// representing the result of an interpolation. Results are returned in their
29// "natural" Go structure rather than in terms of the HIL AST. For the types
30// currently implemented, this means that the Value field can be interpreted as
31// the following Go types:
32// TypeInvalid: undefined
33// TypeString: string
34// TypeList: []interface{}
35// TypeMap: map[string]interface{}
36// TypBool: bool
37type EvaluationResult struct {
38 Type EvalType
39 Value interface{}
40}
41
42// InvalidResult is a structure representing the result of a HIL interpolation
43// which has invalid syntax, missing variables, or some other type of error.
44// The error is described out of band in the accompanying error return value.
45var InvalidResult = EvaluationResult{Type: TypeInvalid, Value: nil}
46
47// errExitUnknown is an internal error that when returned means the result
48// is an unknown value. We use this for early exit.
49var errExitUnknown = errors.New("unknown value")
50
51func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) {
52 output, outputType, err := internalEval(root, config)
53 if err != nil {
54 return InvalidResult, err
55 }
56
57 // If the result contains any nested unknowns then the result as a whole
58 // is unknown, so that callers only have to deal with "entirely known"
59 // or "entirely unknown" as outcomes.
60 if ast.IsUnknown(ast.Variable{Type: outputType, Value: output}) {
61 outputType = ast.TypeUnknown
62 output = UnknownValue
63 }
64
65 switch outputType {
66 case ast.TypeList:
67 val, err := VariableToInterface(ast.Variable{
68 Type: ast.TypeList,
69 Value: output,
70 })
71 return EvaluationResult{
72 Type: TypeList,
73 Value: val,
74 }, err
75 case ast.TypeMap:
76 val, err := VariableToInterface(ast.Variable{
77 Type: ast.TypeMap,
78 Value: output,
79 })
80 return EvaluationResult{
81 Type: TypeMap,
82 Value: val,
83 }, err
84 case ast.TypeString:
85 return EvaluationResult{
86 Type: TypeString,
87 Value: output,
88 }, nil
89 case ast.TypeBool:
90 return EvaluationResult{
91 Type: TypeBool,
92 Value: output,
93 }, nil
94 case ast.TypeUnknown:
95 return EvaluationResult{
96 Type: TypeUnknown,
97 Value: UnknownValue,
98 }, nil
99 default:
100 return InvalidResult, fmt.Errorf("unknown type %s as interpolation output", outputType)
101 }
102}
103
104// Eval evaluates the given AST tree and returns its output value, the type
105// of the output, and any error that occurred.
106func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, error) {
107 // Copy the scope so we can add our builtins
108 if config == nil {
109 config = new(EvalConfig)
110 }
111 scope := registerBuiltins(config.GlobalScope)
112 implicitMap := map[ast.Type]map[ast.Type]string{
113 ast.TypeFloat: {
114 ast.TypeInt: "__builtin_FloatToInt",
115 ast.TypeString: "__builtin_FloatToString",
116 },
117 ast.TypeInt: {
118 ast.TypeFloat: "__builtin_IntToFloat",
119 ast.TypeString: "__builtin_IntToString",
120 },
121 ast.TypeString: {
122 ast.TypeInt: "__builtin_StringToInt",
123 ast.TypeFloat: "__builtin_StringToFloat",
124 ast.TypeBool: "__builtin_StringToBool",
125 },
126 ast.TypeBool: {
127 ast.TypeString: "__builtin_BoolToString",
128 },
129 }
130
131 // Build our own semantic checks that we always run
132 tv := &TypeCheck{Scope: scope, Implicit: implicitMap}
133 ic := &IdentifierCheck{Scope: scope}
134
135 // Build up the semantic checks for execution
136 checks := make(
137 []SemanticChecker,
138 len(config.SemanticChecks),
139 len(config.SemanticChecks)+2)
140 copy(checks, config.SemanticChecks)
141 checks = append(checks, ic.Visit)
142 checks = append(checks, tv.Visit)
143
144 // Run the semantic checks
145 for _, check := range checks {
146 if err := check(root); err != nil {
147 return nil, ast.TypeInvalid, err
148 }
149 }
150
151 // Execute
152 v := &evalVisitor{Scope: scope}
153 return v.Visit(root)
154}
155
156// EvalNode is the interface that must be implemented by any ast.Node
157// to support evaluation. This will be called in visitor pattern order.
158// The result of each call to Eval is automatically pushed onto the
159// stack as a LiteralNode. Pop elements off the stack to get child
160// values.
161type EvalNode interface {
162 Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error)
163}
164
165type evalVisitor struct {
166 Scope ast.Scope
167 Stack ast.Stack
168
169 err error
170 lock sync.Mutex
171}
172
173func (v *evalVisitor) Visit(root ast.Node) (interface{}, ast.Type, error) {
174 // Run the actual visitor pattern
175 root.Accept(v.visit)
176
177 // Get our result and clear out everything else
178 var result *ast.LiteralNode
179 if v.Stack.Len() > 0 {
180 result = v.Stack.Pop().(*ast.LiteralNode)
181 } else {
182 result = new(ast.LiteralNode)
183 }
184 resultErr := v.err
185 if resultErr == errExitUnknown {
186 // This means the return value is unknown and we used the error
187 // as an early exit mechanism. Reset since the value on the stack
188 // should be the unknown value.
189 resultErr = nil
190 }
191
192 // Clear everything else so we aren't just dangling
193 v.Stack.Reset()
194 v.err = nil
195
196 t, err := result.Type(v.Scope)
197 if err != nil {
198 return nil, ast.TypeInvalid, err
199 }
200
201 return result.Value, t, resultErr
202}
203
204func (v *evalVisitor) visit(raw ast.Node) ast.Node {
205 if v.err != nil {
206 return raw
207 }
208
209 en, err := evalNode(raw)
210 if err != nil {
211 v.err = err
212 return raw
213 }
214
215 out, outType, err := en.Eval(v.Scope, &v.Stack)
216 if err != nil {
217 v.err = err
218 return raw
219 }
220
221 v.Stack.Push(&ast.LiteralNode{
222 Value: out,
223 Typex: outType,
224 })
225
226 if outType == ast.TypeUnknown {
227 // Halt immediately
228 v.err = errExitUnknown
229 return raw
230 }
231
232 return raw
233}
234
235// evalNode is a private function that returns an EvalNode for built-in
236// types as well as any other EvalNode implementations.
237func evalNode(raw ast.Node) (EvalNode, error) {
238 switch n := raw.(type) {
239 case *ast.Index:
240 return &evalIndex{n}, nil
241 case *ast.Call:
242 return &evalCall{n}, nil
243 case *ast.Conditional:
244 return &evalConditional{n}, nil
245 case *ast.Output:
246 return &evalOutput{n}, nil
247 case *ast.LiteralNode:
248 return &evalLiteralNode{n}, nil
249 case *ast.VariableAccess:
250 return &evalVariableAccess{n}, nil
251 default:
252 en, ok := n.(EvalNode)
253 if !ok {
254 return nil, fmt.Errorf("node doesn't support evaluation: %#v", raw)
255 }
256
257 return en, nil
258 }
259}
260
261type evalCall struct{ *ast.Call }
262
263func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
264 // Look up the function in the map
265 function, ok := s.LookupFunc(v.Func)
266 if !ok {
267 return nil, ast.TypeInvalid, fmt.Errorf(
268 "unknown function called: %s", v.Func)
269 }
270
271 // The arguments are on the stack in reverse order, so pop them off.
272 args := make([]interface{}, len(v.Args))
273 for i, _ := range v.Args {
274 node := stack.Pop().(*ast.LiteralNode)
275 if node.IsUnknown() {
276 // If any arguments are unknown then the result is automatically unknown
277 return UnknownValue, ast.TypeUnknown, nil
278 }
279 args[len(v.Args)-1-i] = node.Value
280 }
281
282 // Call the function
283 result, err := function.Callback(args)
284 if err != nil {
285 return nil, ast.TypeInvalid, fmt.Errorf("%s: %s", v.Func, err)
286 }
287
288 return result, function.ReturnType, nil
289}
290
291type evalConditional struct{ *ast.Conditional }
292
293func (v *evalConditional) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
294 // On the stack we have literal nodes representing the resulting values
295 // of the condition, true and false expressions, but they are in reverse
296 // order.
297 falseLit := stack.Pop().(*ast.LiteralNode)
298 trueLit := stack.Pop().(*ast.LiteralNode)
299 condLit := stack.Pop().(*ast.LiteralNode)
300
301 if condLit.IsUnknown() {
302 // If our conditional is unknown then our result is also unknown
303 return UnknownValue, ast.TypeUnknown, nil
304 }
305
306 if condLit.Value.(bool) {
307 return trueLit.Value, trueLit.Typex, nil
308 } else {
309 return falseLit.Value, trueLit.Typex, nil
310 }
311}
312
313type evalIndex struct{ *ast.Index }
314
315func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
316 key := stack.Pop().(*ast.LiteralNode)
317 target := stack.Pop().(*ast.LiteralNode)
318
319 variableName := v.Index.Target.(*ast.VariableAccess).Name
320
321 if key.IsUnknown() {
322 // If our key is unknown then our result is also unknown
323 return UnknownValue, ast.TypeUnknown, nil
324 }
325
326 // For target, we'll accept collections containing unknown values but
327 // we still need to catch when the collection itself is unknown, shallowly.
328 if target.Typex == ast.TypeUnknown {
329 return UnknownValue, ast.TypeUnknown, nil
330 }
331
332 switch target.Typex {
333 case ast.TypeList:
334 return v.evalListIndex(variableName, target.Value, key.Value)
335 case ast.TypeMap:
336 return v.evalMapIndex(variableName, target.Value, key.Value)
337 default:
338 return nil, ast.TypeInvalid, fmt.Errorf(
339 "target %q for indexing must be ast.TypeList or ast.TypeMap, is %s",
340 variableName, target.Typex)
341 }
342}
343
344func (v *evalIndex) evalListIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) {
345 // We assume type checking was already done and we can assume that target
346 // is a list and key is an int
347 list, ok := target.([]ast.Variable)
348 if !ok {
349 return nil, ast.TypeInvalid, fmt.Errorf(
350 "cannot cast target to []Variable, is: %T", target)
351 }
352
353 keyInt, ok := key.(int)
354 if !ok {
355 return nil, ast.TypeInvalid, fmt.Errorf(
356 "cannot cast key to int, is: %T", key)
357 }
358
359 if len(list) == 0 {
360 return nil, ast.TypeInvalid, fmt.Errorf("list is empty")
361 }
362
363 if keyInt < 0 || len(list) < keyInt+1 {
364 return nil, ast.TypeInvalid, fmt.Errorf(
365 "index %d out of range for list %s (max %d)",
366 keyInt, variableName, len(list))
367 }
368
369 returnVal := list[keyInt].Value
370 returnType := list[keyInt].Type
371 return returnVal, returnType, nil
372}
373
374func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) {
375 // We assume type checking was already done and we can assume that target
376 // is a map and key is a string
377 vmap, ok := target.(map[string]ast.Variable)
378 if !ok {
379 return nil, ast.TypeInvalid, fmt.Errorf(
380 "cannot cast target to map[string]Variable, is: %T", target)
381 }
382
383 keyString, ok := key.(string)
384 if !ok {
385 return nil, ast.TypeInvalid, fmt.Errorf(
386 "cannot cast key to string, is: %T", key)
387 }
388
389 if len(vmap) == 0 {
390 return nil, ast.TypeInvalid, fmt.Errorf("map is empty")
391 }
392
393 value, ok := vmap[keyString]
394 if !ok {
395 return nil, ast.TypeInvalid, fmt.Errorf(
396 "key %q does not exist in map %s", keyString, variableName)
397 }
398
399 return value.Value, value.Type, nil
400}
401
402type evalOutput struct{ *ast.Output }
403
404func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
405 // The expressions should all be on the stack in reverse
406 // order. So pop them off, reverse their order, and concatenate.
407 nodes := make([]*ast.LiteralNode, 0, len(v.Exprs))
408 haveUnknown := false
409 for range v.Exprs {
410 n := stack.Pop().(*ast.LiteralNode)
411 nodes = append(nodes, n)
412
413 // If we have any unknowns then the whole result is unknown
414 // (we must deal with this first, because the type checker can
415 // skip type conversions in the presence of unknowns, and thus
416 // any of our other nodes may be incorrectly typed.)
417 if n.IsUnknown() {
418 haveUnknown = true
419 }
420 }
421
422 if haveUnknown {
423 return UnknownValue, ast.TypeUnknown, nil
424 }
425
426 // Special case the single list and map
427 if len(nodes) == 1 {
428 switch t := nodes[0].Typex; t {
429 case ast.TypeList:
430 fallthrough
431 case ast.TypeMap:
432 fallthrough
433 case ast.TypeUnknown:
434 return nodes[0].Value, t, nil
435 }
436 }
437
438 // Otherwise concatenate the strings
439 var buf bytes.Buffer
440 for i := len(nodes) - 1; i >= 0; i-- {
441 if nodes[i].Typex != ast.TypeString {
442 return nil, ast.TypeInvalid, fmt.Errorf(
443 "invalid output with %s value at index %d: %#v",
444 nodes[i].Typex,
445 i,
446 nodes[i].Value,
447 )
448 }
449 buf.WriteString(nodes[i].Value.(string))
450 }
451
452 return buf.String(), ast.TypeString, nil
453}
454
455type evalLiteralNode struct{ *ast.LiteralNode }
456
457func (v *evalLiteralNode) Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) {
458 return v.Value, v.Typex, nil
459}
460
461type evalVariableAccess struct{ *ast.VariableAccess }
462
463func (v *evalVariableAccess) Eval(scope ast.Scope, _ *ast.Stack) (interface{}, ast.Type, error) {
464 // Look up the variable in the map
465 variable, ok := scope.LookupVar(v.Name)
466 if !ok {
467 return nil, ast.TypeInvalid, fmt.Errorf(
468 "unknown variable accessed: %s", v.Name)
469 }
470
471 return variable.Value, variable.Type, nil
472}
diff --git a/vendor/github.com/hashicorp/hil/eval_type.go b/vendor/github.com/hashicorp/hil/eval_type.go
new file mode 100644
index 0000000..6946ecd
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/eval_type.go
@@ -0,0 +1,16 @@
1package hil
2
3//go:generate stringer -type=EvalType eval_type.go
4
5// EvalType represents the type of the output returned from a HIL
6// evaluation.
7type EvalType uint32
8
9const (
10 TypeInvalid EvalType = 0
11 TypeString EvalType = 1 << iota
12 TypeBool
13 TypeList
14 TypeMap
15 TypeUnknown
16)
diff --git a/vendor/github.com/hashicorp/hil/evaltype_string.go b/vendor/github.com/hashicorp/hil/evaltype_string.go
new file mode 100644
index 0000000..b107ddd
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/evaltype_string.go
@@ -0,0 +1,42 @@
1// Code generated by "stringer -type=EvalType eval_type.go"; DO NOT EDIT
2
3package hil
4
5import "fmt"
6
7const (
8 _EvalType_name_0 = "TypeInvalid"
9 _EvalType_name_1 = "TypeString"
10 _EvalType_name_2 = "TypeBool"
11 _EvalType_name_3 = "TypeList"
12 _EvalType_name_4 = "TypeMap"
13 _EvalType_name_5 = "TypeUnknown"
14)
15
16var (
17 _EvalType_index_0 = [...]uint8{0, 11}
18 _EvalType_index_1 = [...]uint8{0, 10}
19 _EvalType_index_2 = [...]uint8{0, 8}
20 _EvalType_index_3 = [...]uint8{0, 8}
21 _EvalType_index_4 = [...]uint8{0, 7}
22 _EvalType_index_5 = [...]uint8{0, 11}
23)
24
25func (i EvalType) String() string {
26 switch {
27 case i == 0:
28 return _EvalType_name_0
29 case i == 2:
30 return _EvalType_name_1
31 case i == 4:
32 return _EvalType_name_2
33 case i == 8:
34 return _EvalType_name_3
35 case i == 16:
36 return _EvalType_name_4
37 case i == 32:
38 return _EvalType_name_5
39 default:
40 return fmt.Sprintf("EvalType(%d)", i)
41 }
42}
diff --git a/vendor/github.com/hashicorp/hil/parse.go b/vendor/github.com/hashicorp/hil/parse.go
new file mode 100644
index 0000000..ecbe1fd
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parse.go
@@ -0,0 +1,29 @@
1package hil
2
3import (
4 "github.com/hashicorp/hil/ast"
5 "github.com/hashicorp/hil/parser"
6 "github.com/hashicorp/hil/scanner"
7)
8
9// Parse parses the given program and returns an executable AST tree.
10//
11// Syntax errors are returned with error having the dynamic type
12// *parser.ParseError, which gives the caller access to the source position
13// where the error was found, which allows (for example) combining it with
14// a known source filename to add context to the error message.
15func Parse(v string) (ast.Node, error) {
16 return ParseWithPosition(v, ast.Pos{Line: 1, Column: 1})
17}
18
19// ParseWithPosition is like Parse except that it overrides the source
20// row and column position of the first character in the string, which should
21// be 1-based.
22//
23// This can be used when HIL is embedded in another language and the outer
24// parser knows the row and column where the HIL expression started within
25// the overall source file.
26func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) {
27 ch := scanner.Scan(v, pos)
28 return parser.Parse(ch)
29}
diff --git a/vendor/github.com/hashicorp/hil/parser/binary_op.go b/vendor/github.com/hashicorp/hil/parser/binary_op.go
new file mode 100644
index 0000000..2e013e0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parser/binary_op.go
@@ -0,0 +1,45 @@
1package parser
2
3import (
4 "github.com/hashicorp/hil/ast"
5 "github.com/hashicorp/hil/scanner"
6)
7
8var binaryOps []map[scanner.TokenType]ast.ArithmeticOp
9
10func init() {
11 // This operation table maps from the operator's scanner token type
12 // to the AST arithmetic operation. All expressions produced from
13 // binary operators are *ast.Arithmetic nodes.
14 //
15 // Binary operator groups are listed in order of precedence, with
16 // the *lowest* precedence first. Operators within the same group
17 // have left-to-right associativity.
18 binaryOps = []map[scanner.TokenType]ast.ArithmeticOp{
19 {
20 scanner.OR: ast.ArithmeticOpLogicalOr,
21 },
22 {
23 scanner.AND: ast.ArithmeticOpLogicalAnd,
24 },
25 {
26 scanner.EQUAL: ast.ArithmeticOpEqual,
27 scanner.NOTEQUAL: ast.ArithmeticOpNotEqual,
28 },
29 {
30 scanner.GT: ast.ArithmeticOpGreaterThan,
31 scanner.GTE: ast.ArithmeticOpGreaterThanOrEqual,
32 scanner.LT: ast.ArithmeticOpLessThan,
33 scanner.LTE: ast.ArithmeticOpLessThanOrEqual,
34 },
35 {
36 scanner.PLUS: ast.ArithmeticOpAdd,
37 scanner.MINUS: ast.ArithmeticOpSub,
38 },
39 {
40 scanner.STAR: ast.ArithmeticOpMul,
41 scanner.SLASH: ast.ArithmeticOpDiv,
42 scanner.PERCENT: ast.ArithmeticOpMod,
43 },
44 }
45}
diff --git a/vendor/github.com/hashicorp/hil/parser/error.go b/vendor/github.com/hashicorp/hil/parser/error.go
new file mode 100644
index 0000000..bacd696
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parser/error.go
@@ -0,0 +1,38 @@
1package parser
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hil/ast"
7 "github.com/hashicorp/hil/scanner"
8)
9
10type ParseError struct {
11 Message string
12 Pos ast.Pos
13}
14
15func Errorf(pos ast.Pos, format string, args ...interface{}) error {
16 return &ParseError{
17 Message: fmt.Sprintf(format, args...),
18 Pos: pos,
19 }
20}
21
22// TokenErrorf is a convenient wrapper around Errorf that uses the
23// position of the given token.
24func TokenErrorf(token *scanner.Token, format string, args ...interface{}) error {
25 return Errorf(token.Pos, format, args...)
26}
27
28func ExpectationError(wanted string, got *scanner.Token) error {
29 return TokenErrorf(got, "expected %s but found %s", wanted, got)
30}
31
32func (e *ParseError) Error() string {
33 return fmt.Sprintf("parse error at %s: %s", e.Pos, e.Message)
34}
35
36func (e *ParseError) String() string {
37 return e.Error()
38}
diff --git a/vendor/github.com/hashicorp/hil/parser/fuzz.go b/vendor/github.com/hashicorp/hil/parser/fuzz.go
new file mode 100644
index 0000000..de954f3
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parser/fuzz.go
@@ -0,0 +1,28 @@
1// +build gofuzz
2
3package parser
4
5import (
6 "github.com/hashicorp/hil/ast"
7 "github.com/hashicorp/hil/scanner"
8)
9
10// This is a fuzz testing function designed to be used with go-fuzz:
11// https://github.com/dvyukov/go-fuzz
12//
13// It's not included in a normal build due to the gofuzz build tag above.
14//
15// There are some input files that you can use as a seed corpus for go-fuzz
16// in the directory ./fuzz-corpus .
17
18func Fuzz(data []byte) int {
19 str := string(data)
20
21 ch := scanner.Scan(str, ast.Pos{Line: 1, Column: 1})
22 _, err := Parse(ch)
23 if err != nil {
24 return 0
25 }
26
27 return 1
28}
diff --git a/vendor/github.com/hashicorp/hil/parser/parser.go b/vendor/github.com/hashicorp/hil/parser/parser.go
new file mode 100644
index 0000000..376f1c4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/parser/parser.go
@@ -0,0 +1,522 @@
1package parser
2
3import (
4 "strconv"
5 "unicode/utf8"
6
7 "github.com/hashicorp/hil/ast"
8 "github.com/hashicorp/hil/scanner"
9)
10
11func Parse(ch <-chan *scanner.Token) (ast.Node, error) {
12 peeker := scanner.NewPeeker(ch)
13 parser := &parser{peeker}
14 output, err := parser.ParseTopLevel()
15 peeker.Close()
16 return output, err
17}
18
19type parser struct {
20 peeker *scanner.Peeker
21}
22
23func (p *parser) ParseTopLevel() (ast.Node, error) {
24 return p.parseInterpolationSeq(false)
25}
26
27func (p *parser) ParseQuoted() (ast.Node, error) {
28 return p.parseInterpolationSeq(true)
29}
30
31// parseInterpolationSeq parses either the top-level sequence of literals
32// and interpolation expressions or a similar sequence within a quoted
33// string inside an interpolation expression. The latter case is requested
34// by setting 'quoted' to true.
35func (p *parser) parseInterpolationSeq(quoted bool) (ast.Node, error) {
36 literalType := scanner.LITERAL
37 endType := scanner.EOF
38 if quoted {
39 // exceptions for quoted sequences
40 literalType = scanner.STRING
41 endType = scanner.CQUOTE
42 }
43
44 startPos := p.peeker.Peek().Pos
45
46 if quoted {
47 tok := p.peeker.Read()
48 if tok.Type != scanner.OQUOTE {
49 return nil, ExpectationError("open quote", tok)
50 }
51 }
52
53 var exprs []ast.Node
54 for {
55 tok := p.peeker.Read()
56
57 if tok.Type == endType {
58 break
59 }
60
61 switch tok.Type {
62 case literalType:
63 val, err := p.parseStringToken(tok)
64 if err != nil {
65 return nil, err
66 }
67 exprs = append(exprs, &ast.LiteralNode{
68 Value: val,
69 Typex: ast.TypeString,
70 Posx: tok.Pos,
71 })
72 case scanner.BEGIN:
73 expr, err := p.ParseInterpolation()
74 if err != nil {
75 return nil, err
76 }
77 exprs = append(exprs, expr)
78 default:
79 return nil, ExpectationError(`"${"`, tok)
80 }
81 }
82
83 if len(exprs) == 0 {
84 // If we have no parts at all then the input must've
85 // been an empty string.
86 exprs = append(exprs, &ast.LiteralNode{
87 Value: "",
88 Typex: ast.TypeString,
89 Posx: startPos,
90 })
91 }
92
93 // As a special case, if our "Output" contains only one expression
94 // and it's a literal string then we'll hoist it up to be our
95 // direct return value, so callers can easily recognize a string
96 // that has no interpolations at all.
97 if len(exprs) == 1 {
98 if lit, ok := exprs[0].(*ast.LiteralNode); ok {
99 if lit.Typex == ast.TypeString {
100 return lit, nil
101 }
102 }
103 }
104
105 return &ast.Output{
106 Exprs: exprs,
107 Posx: startPos,
108 }, nil
109}
110
111// parseStringToken takes a token of either LITERAL or STRING type and
112// returns the interpreted string, after processing any relevant
113// escape sequences.
114func (p *parser) parseStringToken(tok *scanner.Token) (string, error) {
115 var backslashes bool
116 switch tok.Type {
117 case scanner.LITERAL:
118 backslashes = false
119 case scanner.STRING:
120 backslashes = true
121 default:
122 panic("unsupported string token type")
123 }
124
125 raw := []byte(tok.Content)
126 buf := make([]byte, 0, len(raw))
127
128 for i := 0; i < len(raw); i++ {
129 b := raw[i]
130 more := len(raw) > (i + 1)
131
132 if b == '$' {
133 if more && raw[i+1] == '$' {
134 // skip over the second dollar sign
135 i++
136 }
137 } else if backslashes && b == '\\' {
138 if !more {
139 return "", Errorf(
140 ast.Pos{
141 Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
142 Line: tok.Pos.Line,
143 },
144 `unfinished backslash escape sequence`,
145 )
146 }
147 escapeType := raw[i+1]
148 switch escapeType {
149 case '\\':
150 // skip over the second slash
151 i++
152 case 'n':
153 b = '\n'
154 i++
155 case '"':
156 b = '"'
157 i++
158 default:
159 return "", Errorf(
160 ast.Pos{
161 Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
162 Line: tok.Pos.Line,
163 },
164 `invalid backslash escape sequence`,
165 )
166 }
167 }
168
169 buf = append(buf, b)
170 }
171
172 return string(buf), nil
173}
174
175func (p *parser) ParseInterpolation() (ast.Node, error) {
176 // By the time we're called, we're already "inside" the ${ sequence
177 // because the caller consumed the ${ token.
178
179 expr, err := p.ParseExpression()
180 if err != nil {
181 return nil, err
182 }
183
184 err = p.requireTokenType(scanner.END, `"}"`)
185 if err != nil {
186 return nil, err
187 }
188
189 return expr, nil
190}
191
192func (p *parser) ParseExpression() (ast.Node, error) {
193 return p.parseTernaryCond()
194}
195
196func (p *parser) parseTernaryCond() (ast.Node, error) {
197 // The ternary condition operator (.. ? .. : ..) behaves somewhat
198 // like a binary operator except that the "operator" is itself
199 // an expression enclosed in two punctuation characters.
200 // The middle expression is parsed as if the ? and : symbols
201 // were parentheses. The "rhs" (the "false expression") is then
202 // treated right-associatively so it behaves similarly to the
203 // middle in terms of precedence.
204
205 startPos := p.peeker.Peek().Pos
206
207 var cond, trueExpr, falseExpr ast.Node
208 var err error
209
210 cond, err = p.parseBinaryOps(binaryOps)
211 if err != nil {
212 return nil, err
213 }
214
215 next := p.peeker.Peek()
216 if next.Type != scanner.QUESTION {
217 return cond, nil
218 }
219
220 p.peeker.Read() // eat question mark
221
222 trueExpr, err = p.ParseExpression()
223 if err != nil {
224 return nil, err
225 }
226
227 colon := p.peeker.Read()
228 if colon.Type != scanner.COLON {
229 return nil, ExpectationError(":", colon)
230 }
231
232 falseExpr, err = p.ParseExpression()
233 if err != nil {
234 return nil, err
235 }
236
237 return &ast.Conditional{
238 CondExpr: cond,
239 TrueExpr: trueExpr,
240 FalseExpr: falseExpr,
241 Posx: startPos,
242 }, nil
243}
244
245// parseBinaryOps calls itself recursively to work through all of the
246// operator precedence groups, and then eventually calls ParseExpressionTerm
247// for each operand.
248func (p *parser) parseBinaryOps(ops []map[scanner.TokenType]ast.ArithmeticOp) (ast.Node, error) {
249 if len(ops) == 0 {
250 // We've run out of operators, so now we'll just try to parse a term.
251 return p.ParseExpressionTerm()
252 }
253
254 thisLevel := ops[0]
255 remaining := ops[1:]
256
257 startPos := p.peeker.Peek().Pos
258
259 var lhs, rhs ast.Node
260 operator := ast.ArithmeticOpInvalid
261 var err error
262
263 // parse a term that might be the first operand of a binary
264 // expression or it might just be a standalone term, but
265 // we won't know until we've parsed it and can look ahead
266 // to see if there's an operator token.
267 lhs, err = p.parseBinaryOps(remaining)
268 if err != nil {
269 return nil, err
270 }
271
272 // We'll keep eating up arithmetic operators until we run
273 // out, so that operators with the same precedence will combine in a
274 // left-associative manner:
275 // a+b+c => (a+b)+c, not a+(b+c)
276 //
277 // Should we later want to have right-associative operators, a way
278 // to achieve that would be to call back up to ParseExpression here
279 // instead of iteratively parsing only the remaining operators.
280 for {
281 next := p.peeker.Peek()
282 var newOperator ast.ArithmeticOp
283 var ok bool
284 if newOperator, ok = thisLevel[next.Type]; !ok {
285 break
286 }
287
288 // Are we extending an expression started on
289 // the previous iteration?
290 if operator != ast.ArithmeticOpInvalid {
291 lhs = &ast.Arithmetic{
292 Op: operator,
293 Exprs: []ast.Node{lhs, rhs},
294 Posx: startPos,
295 }
296 }
297
298 operator = newOperator
299 p.peeker.Read() // eat operator token
300 rhs, err = p.parseBinaryOps(remaining)
301 if err != nil {
302 return nil, err
303 }
304 }
305
306 if operator != ast.ArithmeticOpInvalid {
307 return &ast.Arithmetic{
308 Op: operator,
309 Exprs: []ast.Node{lhs, rhs},
310 Posx: startPos,
311 }, nil
312 } else {
313 return lhs, nil
314 }
315}
316
317func (p *parser) ParseExpressionTerm() (ast.Node, error) {
318
319 next := p.peeker.Peek()
320
321 switch next.Type {
322
323 case scanner.OPAREN:
324 p.peeker.Read()
325 expr, err := p.ParseExpression()
326 if err != nil {
327 return nil, err
328 }
329 err = p.requireTokenType(scanner.CPAREN, `")"`)
330 return expr, err
331
332 case scanner.OQUOTE:
333 return p.ParseQuoted()
334
335 case scanner.INTEGER:
336 tok := p.peeker.Read()
337 val, err := strconv.Atoi(tok.Content)
338 if err != nil {
339 return nil, TokenErrorf(tok, "invalid integer: %s", err)
340 }
341 return &ast.LiteralNode{
342 Value: val,
343 Typex: ast.TypeInt,
344 Posx: tok.Pos,
345 }, nil
346
347 case scanner.FLOAT:
348 tok := p.peeker.Read()
349 val, err := strconv.ParseFloat(tok.Content, 64)
350 if err != nil {
351 return nil, TokenErrorf(tok, "invalid float: %s", err)
352 }
353 return &ast.LiteralNode{
354 Value: val,
355 Typex: ast.TypeFloat,
356 Posx: tok.Pos,
357 }, nil
358
359 case scanner.BOOL:
360 tok := p.peeker.Read()
361 // the scanner guarantees that tok.Content is either "true" or "false"
362 var val bool
363 if tok.Content[0] == 't' {
364 val = true
365 } else {
366 val = false
367 }
368 return &ast.LiteralNode{
369 Value: val,
370 Typex: ast.TypeBool,
371 Posx: tok.Pos,
372 }, nil
373
374 case scanner.MINUS:
375 opTok := p.peeker.Read()
376 // important to use ParseExpressionTerm rather than ParseExpression
377 // here, otherwise we can capture a following binary expression into
378 // our negation.
379 // e.g. -46+5 should parse as (0-46)+5, not 0-(46+5)
380 operand, err := p.ParseExpressionTerm()
381 if err != nil {
382 return nil, err
383 }
384 // The AST currently represents negative numbers as
385 // a binary subtraction of the number from zero.
386 return &ast.Arithmetic{
387 Op: ast.ArithmeticOpSub,
388 Exprs: []ast.Node{
389 &ast.LiteralNode{
390 Value: 0,
391 Typex: ast.TypeInt,
392 Posx: opTok.Pos,
393 },
394 operand,
395 },
396 Posx: opTok.Pos,
397 }, nil
398
399 case scanner.BANG:
400 opTok := p.peeker.Read()
401 // important to use ParseExpressionTerm rather than ParseExpression
402 // here, otherwise we can capture a following binary expression into
403 // our negation.
404 operand, err := p.ParseExpressionTerm()
405 if err != nil {
406 return nil, err
407 }
408 // The AST currently represents binary negation as an equality
409 // test with "false".
410 return &ast.Arithmetic{
411 Op: ast.ArithmeticOpEqual,
412 Exprs: []ast.Node{
413 &ast.LiteralNode{
414 Value: false,
415 Typex: ast.TypeBool,
416 Posx: opTok.Pos,
417 },
418 operand,
419 },
420 Posx: opTok.Pos,
421 }, nil
422
423 case scanner.IDENTIFIER:
424 return p.ParseScopeInteraction()
425
426 default:
427 return nil, ExpectationError("expression", next)
428 }
429}
430
431// ParseScopeInteraction parses the expression types that interact
432// with the evaluation scope: variable access, function calls, and
433// indexing.
434//
435// Indexing should actually be a distinct operator in its own right,
436// so that e.g. it can be applied to the result of a function call,
437// but for now we're preserving the behavior of the older yacc-based
438// parser.
439func (p *parser) ParseScopeInteraction() (ast.Node, error) {
440 first := p.peeker.Read()
441 startPos := first.Pos
442 if first.Type != scanner.IDENTIFIER {
443 return nil, ExpectationError("identifier", first)
444 }
445
446 next := p.peeker.Peek()
447 if next.Type == scanner.OPAREN {
448 // function call
449 funcName := first.Content
450 p.peeker.Read() // eat paren
451 var args []ast.Node
452
453 for {
454 if p.peeker.Peek().Type == scanner.CPAREN {
455 break
456 }
457
458 arg, err := p.ParseExpression()
459 if err != nil {
460 return nil, err
461 }
462
463 args = append(args, arg)
464
465 if p.peeker.Peek().Type == scanner.COMMA {
466 p.peeker.Read() // eat comma
467 continue
468 } else {
469 break
470 }
471 }
472
473 err := p.requireTokenType(scanner.CPAREN, `")"`)
474 if err != nil {
475 return nil, err
476 }
477
478 return &ast.Call{
479 Func: funcName,
480 Args: args,
481 Posx: startPos,
482 }, nil
483 }
484
485 varNode := &ast.VariableAccess{
486 Name: first.Content,
487 Posx: startPos,
488 }
489
490 if p.peeker.Peek().Type == scanner.OBRACKET {
491 // index operator
492 startPos := p.peeker.Read().Pos // eat bracket
493 indexExpr, err := p.ParseExpression()
494 if err != nil {
495 return nil, err
496 }
497 err = p.requireTokenType(scanner.CBRACKET, `"]"`)
498 if err != nil {
499 return nil, err
500 }
501 return &ast.Index{
502 Target: varNode,
503 Key: indexExpr,
504 Posx: startPos,
505 }, nil
506 }
507
508 return varNode, nil
509}
510
511// requireTokenType consumes the next token an returns an error if its
512// type does not match the given type. nil is returned if the type matches.
513//
514// This is a helper around peeker.Read() for situations where the parser just
515// wants to assert that a particular token type must be present.
516func (p *parser) requireTokenType(wantType scanner.TokenType, wantName string) error {
517 token := p.peeker.Read()
518 if token.Type != wantType {
519 return ExpectationError(wantName, token)
520 }
521 return nil
522}
diff --git a/vendor/github.com/hashicorp/hil/scanner/peeker.go b/vendor/github.com/hashicorp/hil/scanner/peeker.go
new file mode 100644
index 0000000..4de3728
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/scanner/peeker.go
@@ -0,0 +1,55 @@
1package scanner
2
3// Peeker is a utility that wraps a token channel returned by Scan and
4// provides an interface that allows a caller (e.g. the parser) to
5// work with the token stream in a mode that allows one token of lookahead,
6// and provides utilities for more convenient processing of the stream.
7type Peeker struct {
8 ch <-chan *Token
9 peeked *Token
10}
11
12func NewPeeker(ch <-chan *Token) *Peeker {
13 return &Peeker{
14 ch: ch,
15 }
16}
17
18// Peek returns the next token in the stream without consuming it. A
19// subsequent call to Read will return the same token.
20func (p *Peeker) Peek() *Token {
21 if p.peeked == nil {
22 p.peeked = <-p.ch
23 }
24 return p.peeked
25}
26
27// Read consumes the next token in the stream and returns it.
28func (p *Peeker) Read() *Token {
29 token := p.Peek()
30
31 // As a special case, we will produce the EOF token forever once
32 // it is reached.
33 if token.Type != EOF {
34 p.peeked = nil
35 }
36
37 return token
38}
39
40// Close ensures that the token stream has been exhausted, to prevent
41// the goroutine in the underlying scanner from leaking.
42//
43// It's not necessary to call this if the caller reads the token stream
44// to EOF, since that implicitly closes the scanner.
45func (p *Peeker) Close() {
46 for _ = range p.ch {
47 // discard
48 }
49 // Install a synthetic EOF token in 'peeked' in case someone
50 // erroneously calls Peek() or Read() after we've closed.
51 p.peeked = &Token{
52 Type: EOF,
53 Content: "",
54 }
55}
diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go
new file mode 100644
index 0000000..bab86c6
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/scanner/scanner.go
@@ -0,0 +1,550 @@
1package scanner
2
3import (
4 "unicode"
5 "unicode/utf8"
6
7 "github.com/hashicorp/hil/ast"
8)
9
10// Scan returns a channel that recieves Tokens from the given input string.
11//
12// The scanner's job is just to partition the string into meaningful parts.
13// It doesn't do any transformation of the raw input string, so the caller
14// must deal with any further interpretation required, such as parsing INTEGER
15// tokens into real ints, or dealing with escape sequences in LITERAL or
16// STRING tokens.
17//
18// Strings in the returned tokens are slices from the original string.
19//
20// startPos should be set to ast.InitPos unless the caller knows that
21// this interpolation string is part of a larger file and knows the position
22// of the first character in that larger file.
23func Scan(s string, startPos ast.Pos) <-chan *Token {
24 ch := make(chan *Token)
25 go scan(s, ch, startPos)
26 return ch
27}
28
29func scan(s string, ch chan<- *Token, pos ast.Pos) {
30 // 'remain' starts off as the whole string but we gradually
31 // slice of the front of it as we work our way through.
32 remain := s
33
34 // nesting keeps track of how many ${ .. } sequences we are
35 // inside, so we can recognize the minor differences in syntax
36 // between outer string literals (LITERAL tokens) and quoted
37 // string literals (STRING tokens).
38 nesting := 0
39
40 // We're going to flip back and forth between parsing literals/strings
41 // and parsing interpolation sequences ${ .. } until we reach EOF or
42 // some INVALID token.
43All:
44 for {
45 startPos := pos
46 // Literal string processing first, since the beginning of
47 // a string is always outside of an interpolation sequence.
48 literalVal, terminator := scanLiteral(remain, pos, nesting > 0)
49
50 if len(literalVal) > 0 {
51 litType := LITERAL
52 if nesting > 0 {
53 litType = STRING
54 }
55 ch <- &Token{
56 Type: litType,
57 Content: literalVal,
58 Pos: startPos,
59 }
60 remain = remain[len(literalVal):]
61 }
62
63 ch <- terminator
64 remain = remain[len(terminator.Content):]
65 pos = terminator.Pos
66 // Safe to use len() here because none of the terminator tokens
67 // can contain UTF-8 sequences.
68 pos.Column = pos.Column + len(terminator.Content)
69
70 switch terminator.Type {
71 case INVALID:
72 // Synthetic EOF after invalid token, since further scanning
73 // is likely to just produce more garbage.
74 ch <- &Token{
75 Type: EOF,
76 Content: "",
77 Pos: pos,
78 }
79 break All
80 case EOF:
81 // All done!
82 break All
83 case BEGIN:
84 nesting++
85 case CQUOTE:
86 // nothing special to do
87 default:
88 // Should never happen
89 panic("invalid string/literal terminator")
90 }
91
92 // Now we do the processing of the insides of ${ .. } sequences.
93 // This loop terminates when we encounter either a closing } or
94 // an opening ", which will cause us to return to literal processing.
95 Interpolation:
96 for {
97
98 token, size, newPos := scanInterpolationToken(remain, pos)
99 ch <- token
100 remain = remain[size:]
101 pos = newPos
102
103 switch token.Type {
104 case INVALID:
105 // Synthetic EOF after invalid token, since further scanning
106 // is likely to just produce more garbage.
107 ch <- &Token{
108 Type: EOF,
109 Content: "",
110 Pos: pos,
111 }
112 break All
113 case EOF:
114 // All done
115 // (though a syntax error that we'll catch in the parser)
116 break All
117 case END:
118 nesting--
119 if nesting < 0 {
120 // Can happen if there are unbalanced ${ and } sequences
121 // in the input, which we'll catch in the parser.
122 nesting = 0
123 }
124 break Interpolation
125 case OQUOTE:
126 // Beginning of nested quoted string
127 break Interpolation
128 }
129 }
130 }
131
132 close(ch)
133}
134
135// Returns the token found at the start of the given string, followed by
136// the number of bytes that were consumed from the string and the adjusted
137// source position.
138//
139// Note that the number of bytes consumed can be more than the length of
140// the returned token contents if the string begins with whitespace, since
141// it will be silently consumed before reading the token.
142func scanInterpolationToken(s string, startPos ast.Pos) (*Token, int, ast.Pos) {
143 pos := startPos
144 size := 0
145
146 // Consume whitespace, if any
147 for len(s) > 0 && byteIsSpace(s[0]) {
148 if s[0] == '\n' {
149 pos.Column = 1
150 pos.Line++
151 } else {
152 pos.Column++
153 }
154 size++
155 s = s[1:]
156 }
157
158 // Unexpected EOF during sequence
159 if len(s) == 0 {
160 return &Token{
161 Type: EOF,
162 Content: "",
163 Pos: pos,
164 }, size, pos
165 }
166
167 next := s[0]
168 var token *Token
169
170 switch next {
171 case '(', ')', '[', ']', ',', '.', '+', '-', '*', '/', '%', '?', ':':
172 // Easy punctuation symbols that don't have any special meaning
173 // during scanning, and that stand for themselves in the
174 // TokenType enumeration.
175 token = &Token{
176 Type: TokenType(next),
177 Content: s[:1],
178 Pos: pos,
179 }
180 case '}':
181 token = &Token{
182 Type: END,
183 Content: s[:1],
184 Pos: pos,
185 }
186 case '"':
187 token = &Token{
188 Type: OQUOTE,
189 Content: s[:1],
190 Pos: pos,
191 }
192 case '!':
193 if len(s) >= 2 && s[:2] == "!=" {
194 token = &Token{
195 Type: NOTEQUAL,
196 Content: s[:2],
197 Pos: pos,
198 }
199 } else {
200 token = &Token{
201 Type: BANG,
202 Content: s[:1],
203 Pos: pos,
204 }
205 }
206 case '<':
207 if len(s) >= 2 && s[:2] == "<=" {
208 token = &Token{
209 Type: LTE,
210 Content: s[:2],
211 Pos: pos,
212 }
213 } else {
214 token = &Token{
215 Type: LT,
216 Content: s[:1],
217 Pos: pos,
218 }
219 }
220 case '>':
221 if len(s) >= 2 && s[:2] == ">=" {
222 token = &Token{
223 Type: GTE,
224 Content: s[:2],
225 Pos: pos,
226 }
227 } else {
228 token = &Token{
229 Type: GT,
230 Content: s[:1],
231 Pos: pos,
232 }
233 }
234 case '=':
235 if len(s) >= 2 && s[:2] == "==" {
236 token = &Token{
237 Type: EQUAL,
238 Content: s[:2],
239 Pos: pos,
240 }
241 } else {
242 // A single equals is not a valid operator
243 token = &Token{
244 Type: INVALID,
245 Content: s[:1],
246 Pos: pos,
247 }
248 }
249 case '&':
250 if len(s) >= 2 && s[:2] == "&&" {
251 token = &Token{
252 Type: AND,
253 Content: s[:2],
254 Pos: pos,
255 }
256 } else {
257 token = &Token{
258 Type: INVALID,
259 Content: s[:1],
260 Pos: pos,
261 }
262 }
263 case '|':
264 if len(s) >= 2 && s[:2] == "||" {
265 token = &Token{
266 Type: OR,
267 Content: s[:2],
268 Pos: pos,
269 }
270 } else {
271 token = &Token{
272 Type: INVALID,
273 Content: s[:1],
274 Pos: pos,
275 }
276 }
277 default:
278 if next >= '0' && next <= '9' {
279 num, numType := scanNumber(s)
280 token = &Token{
281 Type: numType,
282 Content: num,
283 Pos: pos,
284 }
285 } else if stringStartsWithIdentifier(s) {
286 ident, runeLen := scanIdentifier(s)
287 tokenType := IDENTIFIER
288 if ident == "true" || ident == "false" {
289 tokenType = BOOL
290 }
291 token = &Token{
292 Type: tokenType,
293 Content: ident,
294 Pos: pos,
295 }
296 // Skip usual token handling because it doesn't
297 // know how to deal with UTF-8 sequences.
298 pos.Column = pos.Column + runeLen
299 return token, size + len(ident), pos
300 } else {
301 _, byteLen := utf8.DecodeRuneInString(s)
302 token = &Token{
303 Type: INVALID,
304 Content: s[:byteLen],
305 Pos: pos,
306 }
307 // Skip usual token handling because it doesn't
308 // know how to deal with UTF-8 sequences.
309 pos.Column = pos.Column + 1
310 return token, size + byteLen, pos
311 }
312 }
313
314 // Here we assume that the token content contains no UTF-8 sequences,
315 // because we dealt with UTF-8 characters as a special case where
316 // necessary above.
317 size = size + len(token.Content)
318 pos.Column = pos.Column + len(token.Content)
319
320 return token, size, pos
321}
322
323// Returns the (possibly-empty) prefix of the given string that represents
324// a literal, followed by the token that marks the end of the literal.
325func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) {
326 litLen := 0
327 pos := startPos
328 var terminator *Token
329 for {
330
331 if litLen >= len(s) {
332 if nested {
333 // We've ended in the middle of a quoted string,
334 // which means this token is actually invalid.
335 return "", &Token{
336 Type: INVALID,
337 Content: s,
338 Pos: startPos,
339 }
340 }
341 terminator = &Token{
342 Type: EOF,
343 Content: "",
344 Pos: pos,
345 }
346 break
347 }
348
349 next := s[litLen]
350
351 if next == '$' && len(s) > litLen+1 {
352 follow := s[litLen+1]
353
354 if follow == '{' {
355 terminator = &Token{
356 Type: BEGIN,
357 Content: s[litLen : litLen+2],
358 Pos: pos,
359 }
360 pos.Column = pos.Column + 2
361 break
362 } else if follow == '$' {
363 // Double-$ escapes the special processing of $,
364 // so we will consume both characters here.
365 pos.Column = pos.Column + 2
366 litLen = litLen + 2
367 continue
368 }
369 }
370
371 // special handling that applies only to quoted strings
372 if nested {
373 if next == '"' {
374 terminator = &Token{
375 Type: CQUOTE,
376 Content: s[litLen : litLen+1],
377 Pos: pos,
378 }
379 pos.Column = pos.Column + 1
380 break
381 }
382
383 // Escaped quote marks do not terminate the string.
384 //
385 // All we do here in the scanner is avoid terminating a string
386 // due to an escaped quote. The parser is responsible for the
387 // full handling of escape sequences, since it's able to produce
388 // better error messages than we can produce in here.
389 if next == '\\' && len(s) > litLen+1 {
390 follow := s[litLen+1]
391
392 if follow == '"' {
393 // \" escapes the special processing of ",
394 // so we will consume both characters here.
395 pos.Column = pos.Column + 2
396 litLen = litLen + 2
397 continue
398 }
399 }
400 }
401
402 if next == '\n' {
403 pos.Column = 1
404 pos.Line++
405 litLen++
406 } else {
407 pos.Column++
408
409 // "Column" measures runes, so we need to actually consume
410 // a valid UTF-8 character here.
411 _, size := utf8.DecodeRuneInString(s[litLen:])
412 litLen = litLen + size
413 }
414
415 }
416
417 return s[:litLen], terminator
418}
419
420// scanNumber returns the extent of the prefix of the string that represents
421// a valid number, along with what type of number it represents: INT or FLOAT.
422//
423// scanNumber does only basic character analysis: numbers consist of digits
424// and periods, with at least one period signalling a FLOAT. It's the parser's
425// responsibility to validate the form and range of the number, such as ensuring
426// that a FLOAT actually contains only one period, etc.
427func scanNumber(s string) (string, TokenType) {
428 period := -1
429 byteLen := 0
430 numType := INTEGER
431 for {
432 if byteLen >= len(s) {
433 break
434 }
435
436 next := s[byteLen]
437 if next != '.' && (next < '0' || next > '9') {
438 // If our last value was a period, then we're not a float,
439 // we're just an integer that ends in a period.
440 if period == byteLen-1 {
441 byteLen--
442 numType = INTEGER
443 }
444
445 break
446 }
447
448 if next == '.' {
449 // If we've already seen a period, break out
450 if period >= 0 {
451 break
452 }
453
454 period = byteLen
455 numType = FLOAT
456 }
457
458 byteLen++
459 }
460
461 return s[:byteLen], numType
462}
463
464// scanIdentifier returns the extent of the prefix of the string that
465// represents a valid identifier, along with the length of that prefix
466// in runes.
467//
468// Identifiers may contain utf8-encoded non-Latin letters, which will
469// cause the returned "rune length" to be shorter than the byte length
470// of the returned string.
471func scanIdentifier(s string) (string, int) {
472 byteLen := 0
473 runeLen := 0
474 for {
475 if byteLen >= len(s) {
476 break
477 }
478
479 nextRune, size := utf8.DecodeRuneInString(s[byteLen:])
480 if !(nextRune == '_' ||
481 nextRune == '-' ||
482 nextRune == '.' ||
483 nextRune == '*' ||
484 unicode.IsNumber(nextRune) ||
485 unicode.IsLetter(nextRune) ||
486 unicode.IsMark(nextRune)) {
487 break
488 }
489
490 // If we reach a star, it must be between periods to be part
491 // of the same identifier.
492 if nextRune == '*' && s[byteLen-1] != '.' {
493 break
494 }
495
496 // If our previous character was a star, then the current must
497 // be period. Otherwise, undo that and exit.
498 if byteLen > 0 && s[byteLen-1] == '*' && nextRune != '.' {
499 byteLen--
500 if s[byteLen-1] == '.' {
501 byteLen--
502 }
503
504 break
505 }
506
507 byteLen = byteLen + size
508 runeLen = runeLen + 1
509 }
510
511 return s[:byteLen], runeLen
512}
513
514// byteIsSpace implements a restrictive interpretation of spaces that includes
515// only what's valid inside interpolation sequences: spaces, tabs, newlines.
516func byteIsSpace(b byte) bool {
517 switch b {
518 case ' ', '\t', '\r', '\n':
519 return true
520 default:
521 return false
522 }
523}
524
525// stringStartsWithIdentifier returns true if the given string begins with
526// a character that is a legal start of an identifier: an underscore or
527// any character that Unicode considers to be a letter.
528func stringStartsWithIdentifier(s string) bool {
529 if len(s) == 0 {
530 return false
531 }
532
533 first := s[0]
534
535 // Easy ASCII cases first
536 if (first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_' {
537 return true
538 }
539
540 // If our first byte begins a UTF-8 sequence then the sequence might
541 // be a unicode letter.
542 if utf8.RuneStart(first) {
543 firstRune, _ := utf8.DecodeRuneInString(s)
544 if unicode.IsLetter(firstRune) {
545 return true
546 }
547 }
548
549 return false
550}
diff --git a/vendor/github.com/hashicorp/hil/scanner/token.go b/vendor/github.com/hashicorp/hil/scanner/token.go
new file mode 100644
index 0000000..b6c82ae
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/scanner/token.go
@@ -0,0 +1,105 @@
1package scanner
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hil/ast"
7)
8
9type Token struct {
10 Type TokenType
11 Content string
12 Pos ast.Pos
13}
14
15//go:generate stringer -type=TokenType
16type TokenType rune
17
18const (
19 // Raw string data outside of ${ .. } sequences
20 LITERAL TokenType = 'o'
21
22 // STRING is like a LITERAL but it's inside a quoted string
23 // within a ${ ... } sequence, and so it can contain backslash
24 // escaping.
25 STRING TokenType = 'S'
26
27 // Other Literals
28 INTEGER TokenType = 'I'
29 FLOAT TokenType = 'F'
30 BOOL TokenType = 'B'
31
32 BEGIN TokenType = '$' // actually "${"
33 END TokenType = '}'
34 OQUOTE TokenType = '“' // Opening quote of a nested quoted sequence
35 CQUOTE TokenType = '”' // Closing quote of a nested quoted sequence
36 OPAREN TokenType = '('
37 CPAREN TokenType = ')'
38 OBRACKET TokenType = '['
39 CBRACKET TokenType = ']'
40 COMMA TokenType = ','
41
42 IDENTIFIER TokenType = 'i'
43
44 PERIOD TokenType = '.'
45 PLUS TokenType = '+'
46 MINUS TokenType = '-'
47 STAR TokenType = '*'
48 SLASH TokenType = '/'
49 PERCENT TokenType = '%'
50
51 AND TokenType = '∧'
52 OR TokenType = '∨'
53 BANG TokenType = '!'
54
55 EQUAL TokenType = '='
56 NOTEQUAL TokenType = '≠'
57 GT TokenType = '>'
58 LT TokenType = '<'
59 GTE TokenType = '≥'
60 LTE TokenType = '≤'
61
62 QUESTION TokenType = '?'
63 COLON TokenType = ':'
64
65 EOF TokenType = '␄'
66
67 // Produced for sequences that cannot be understood as valid tokens
68 // e.g. due to use of unrecognized punctuation.
69 INVALID TokenType = '�'
70)
71
72func (t *Token) String() string {
73 switch t.Type {
74 case EOF:
75 return "end of string"
76 case INVALID:
77 return fmt.Sprintf("invalid sequence %q", t.Content)
78 case INTEGER:
79 return fmt.Sprintf("integer %s", t.Content)
80 case FLOAT:
81 return fmt.Sprintf("float %s", t.Content)
82 case STRING:
83 return fmt.Sprintf("string %q", t.Content)
84 case LITERAL:
85 return fmt.Sprintf("literal %q", t.Content)
86 case OQUOTE:
87 return fmt.Sprintf("opening quote")
88 case CQUOTE:
89 return fmt.Sprintf("closing quote")
90 case AND:
91 return "&&"
92 case OR:
93 return "||"
94 case NOTEQUAL:
95 return "!="
96 case GTE:
97 return ">="
98 case LTE:
99 return "<="
100 default:
101 // The remaining token types have content that
102 // speaks for itself.
103 return fmt.Sprintf("%q", t.Content)
104 }
105}
diff --git a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go
new file mode 100644
index 0000000..a602f5f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go
@@ -0,0 +1,51 @@
1// Code generated by "stringer -type=TokenType"; DO NOT EDIT
2
3package scanner
4
5import "fmt"
6
7const _TokenType_name = "BANGBEGINPERCENTOPARENCPARENSTARPLUSCOMMAMINUSPERIODSLASHCOLONLTEQUALGTQUESTIONBOOLFLOATINTEGERSTRINGOBRACKETCBRACKETIDENTIFIERLITERALENDOQUOTECQUOTEANDORNOTEQUALLTEGTEEOFINVALID"
8
9var _TokenType_map = map[TokenType]string{
10 33: _TokenType_name[0:4],
11 36: _TokenType_name[4:9],
12 37: _TokenType_name[9:16],
13 40: _TokenType_name[16:22],
14 41: _TokenType_name[22:28],
15 42: _TokenType_name[28:32],
16 43: _TokenType_name[32:36],
17 44: _TokenType_name[36:41],
18 45: _TokenType_name[41:46],
19 46: _TokenType_name[46:52],
20 47: _TokenType_name[52:57],
21 58: _TokenType_name[57:62],
22 60: _TokenType_name[62:64],
23 61: _TokenType_name[64:69],
24 62: _TokenType_name[69:71],
25 63: _TokenType_name[71:79],
26 66: _TokenType_name[79:83],
27 70: _TokenType_name[83:88],
28 73: _TokenType_name[88:95],
29 83: _TokenType_name[95:101],
30 91: _TokenType_name[101:109],
31 93: _TokenType_name[109:117],
32 105: _TokenType_name[117:127],
33 111: _TokenType_name[127:134],
34 125: _TokenType_name[134:137],
35 8220: _TokenType_name[137:143],
36 8221: _TokenType_name[143:149],
37 8743: _TokenType_name[149:152],
38 8744: _TokenType_name[152:154],
39 8800: _TokenType_name[154:162],
40 8804: _TokenType_name[162:165],
41 8805: _TokenType_name[165:168],
42 9220: _TokenType_name[168:171],
43 65533: _TokenType_name[171:178],
44}
45
46func (i TokenType) String() string {
47 if str, ok := _TokenType_map[i]; ok {
48 return str
49 }
50 return fmt.Sprintf("TokenType(%d)", i)
51}
diff --git a/vendor/github.com/hashicorp/hil/transform_fixed.go b/vendor/github.com/hashicorp/hil/transform_fixed.go
new file mode 100644
index 0000000..e69df29
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/transform_fixed.go
@@ -0,0 +1,29 @@
1package hil
2
3import (
4 "github.com/hashicorp/hil/ast"
5)
6
7// FixedValueTransform transforms an AST to return a fixed value for
8// all interpolations. i.e. you can make "hi ${anything}" always
9// turn into "hi foo".
10//
11// The primary use case for this is for config validations where you can
12// verify that interpolations result in a certain type of string.
13func FixedValueTransform(root ast.Node, Value *ast.LiteralNode) ast.Node {
14 // We visit the nodes in top-down order
15 result := root
16 switch n := result.(type) {
17 case *ast.Output:
18 for i, v := range n.Exprs {
19 n.Exprs[i] = FixedValueTransform(v, Value)
20 }
21 case *ast.LiteralNode:
22 // We keep it as-is
23 default:
24 // Anything else we replace
25 result = Value
26 }
27
28 return result
29}
diff --git a/vendor/github.com/hashicorp/hil/walk.go b/vendor/github.com/hashicorp/hil/walk.go
new file mode 100644
index 0000000..0ace830
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/walk.go
@@ -0,0 +1,266 @@
1package hil
2
3import (
4 "fmt"
5 "reflect"
6 "strings"
7
8 "github.com/hashicorp/hil/ast"
9 "github.com/mitchellh/reflectwalk"
10)
11
12// WalkFn is the type of function to pass to Walk. Modify fields within
13// WalkData to control whether replacement happens.
14type WalkFn func(*WalkData) error
15
16// WalkData is the structure passed to the callback of the Walk function.
17//
18// This structure contains data passed in as well as fields that are expected
19// to be written by the caller as a result. Please see the documentation for
20// each field for more information.
21type WalkData struct {
22 // Root is the parsed root of this HIL program
23 Root ast.Node
24
25 // Location is the location within the structure where this
26 // value was found. This can be used to modify behavior within
27 // slices and so on.
28 Location reflectwalk.Location
29
30 // The below two values must be set by the callback to have any effect.
31 //
32 // Replace, if true, will replace the value in the structure with
33 // ReplaceValue. It is up to the caller to make sure this is a string.
34 Replace bool
35 ReplaceValue string
36}
37
38// Walk will walk an arbitrary Go structure and parse any string as an
39// HIL program and call the callback cb to determine what to replace it
40// with.
41//
42// This function is very useful for arbitrary HIL program interpolation
43// across a complex configuration structure. Due to the heavy use of
44// reflection in this function, it is recommend to write many unit tests
45// with your typical configuration structures to hilp mitigate the risk
46// of panics.
47func Walk(v interface{}, cb WalkFn) error {
48 walker := &interpolationWalker{F: cb}
49 return reflectwalk.Walk(v, walker)
50}
51
52// interpolationWalker implements interfaces for the reflectwalk package
53// (github.com/mitchellh/reflectwalk) that can be used to automatically
54// execute a callback for an interpolation.
55type interpolationWalker struct {
56 F WalkFn
57
58 key []string
59 lastValue reflect.Value
60 loc reflectwalk.Location
61 cs []reflect.Value
62 csKey []reflect.Value
63 csData interface{}
64 sliceIndex int
65 unknownKeys []string
66}
67
68func (w *interpolationWalker) Enter(loc reflectwalk.Location) error {
69 w.loc = loc
70 return nil
71}
72
73func (w *interpolationWalker) Exit(loc reflectwalk.Location) error {
74 w.loc = reflectwalk.None
75
76 switch loc {
77 case reflectwalk.Map:
78 w.cs = w.cs[:len(w.cs)-1]
79 case reflectwalk.MapValue:
80 w.key = w.key[:len(w.key)-1]
81 w.csKey = w.csKey[:len(w.csKey)-1]
82 case reflectwalk.Slice:
83 // Split any values that need to be split
84 w.splitSlice()
85 w.cs = w.cs[:len(w.cs)-1]
86 case reflectwalk.SliceElem:
87 w.csKey = w.csKey[:len(w.csKey)-1]
88 }
89
90 return nil
91}
92
93func (w *interpolationWalker) Map(m reflect.Value) error {
94 w.cs = append(w.cs, m)
95 return nil
96}
97
98func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error {
99 w.csData = k
100 w.csKey = append(w.csKey, k)
101 w.key = append(w.key, k.String())
102 w.lastValue = v
103 return nil
104}
105
106func (w *interpolationWalker) Slice(s reflect.Value) error {
107 w.cs = append(w.cs, s)
108 return nil
109}
110
111func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error {
112 w.csKey = append(w.csKey, reflect.ValueOf(i))
113 w.sliceIndex = i
114 return nil
115}
116
117func (w *interpolationWalker) Primitive(v reflect.Value) error {
118 setV := v
119
120 // We only care about strings
121 if v.Kind() == reflect.Interface {
122 setV = v
123 v = v.Elem()
124 }
125 if v.Kind() != reflect.String {
126 return nil
127 }
128
129 astRoot, err := Parse(v.String())
130 if err != nil {
131 return err
132 }
133
134 // If the AST we got is just a literal string value with the same
135 // value then we ignore it. We have to check if its the same value
136 // because it is possible to input a string, get out a string, and
137 // have it be different. For example: "foo-$${bar}" turns into
138 // "foo-${bar}"
139 if n, ok := astRoot.(*ast.LiteralNode); ok {
140 if s, ok := n.Value.(string); ok && s == v.String() {
141 return nil
142 }
143 }
144
145 if w.F == nil {
146 return nil
147 }
148
149 data := WalkData{Root: astRoot, Location: w.loc}
150 if err := w.F(&data); err != nil {
151 return fmt.Errorf(
152 "%s in:\n\n%s",
153 err, v.String())
154 }
155
156 if data.Replace {
157 /*
158 if remove {
159 w.removeCurrent()
160 return nil
161 }
162 */
163
164 resultVal := reflect.ValueOf(data.ReplaceValue)
165 switch w.loc {
166 case reflectwalk.MapKey:
167 m := w.cs[len(w.cs)-1]
168
169 // Delete the old value
170 var zero reflect.Value
171 m.SetMapIndex(w.csData.(reflect.Value), zero)
172
173 // Set the new key with the existing value
174 m.SetMapIndex(resultVal, w.lastValue)
175
176 // Set the key to be the new key
177 w.csData = resultVal
178 case reflectwalk.MapValue:
179 // If we're in a map, then the only way to set a map value is
180 // to set it directly.
181 m := w.cs[len(w.cs)-1]
182 mk := w.csData.(reflect.Value)
183 m.SetMapIndex(mk, resultVal)
184 default:
185 // Otherwise, we should be addressable
186 setV.Set(resultVal)
187 }
188 }
189
190 return nil
191}
192
193func (w *interpolationWalker) removeCurrent() {
194 // Append the key to the unknown keys
195 w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, "."))
196
197 for i := 1; i <= len(w.cs); i++ {
198 c := w.cs[len(w.cs)-i]
199 switch c.Kind() {
200 case reflect.Map:
201 // Zero value so that we delete the map key
202 var val reflect.Value
203
204 // Get the key and delete it
205 k := w.csData.(reflect.Value)
206 c.SetMapIndex(k, val)
207 return
208 }
209 }
210
211 panic("No container found for removeCurrent")
212}
213
214func (w *interpolationWalker) replaceCurrent(v reflect.Value) {
215 c := w.cs[len(w.cs)-2]
216 switch c.Kind() {
217 case reflect.Map:
218 // Get the key and delete it
219 k := w.csKey[len(w.csKey)-1]
220 c.SetMapIndex(k, v)
221 }
222}
223
224func (w *interpolationWalker) splitSlice() {
225 // Get the []interface{} slice so we can do some operations on
226 // it without dealing with reflection. We'll document each step
227 // here to be clear.
228 var s []interface{}
229 raw := w.cs[len(w.cs)-1]
230 switch v := raw.Interface().(type) {
231 case []interface{}:
232 s = v
233 case []map[string]interface{}:
234 return
235 default:
236 panic("Unknown kind: " + raw.Kind().String())
237 }
238
239 // Check if we have any elements that we need to split. If not, then
240 // just return since we're done.
241 split := false
242 if !split {
243 return
244 }
245
246 // Make a new result slice that is twice the capacity to fit our growth.
247 result := make([]interface{}, 0, len(s)*2)
248
249 // Go over each element of the original slice and start building up
250 // the resulting slice by splitting where we have to.
251 for _, v := range s {
252 sv, ok := v.(string)
253 if !ok {
254 // Not a string, so just set it
255 result = append(result, v)
256 continue
257 }
258
259 // Not a string list, so just set it
260 result = append(result, sv)
261 }
262
263 // Our slice is now done, we have to replace the slice now
264 // with this new one that we have.
265 w.replaceCurrent(reflect.ValueOf(result))
266}
diff --git a/vendor/github.com/hashicorp/logutils/LICENSE b/vendor/github.com/hashicorp/logutils/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/vendor/github.com/hashicorp/logutils/LICENSE
@@ -0,0 +1,354 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. “Contributor”
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. “Contributor Version”
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor’s Contribution.
14
151.3. “Contribution”
16
17 means Covered Software of a particular Contributor.
18
191.4. “Covered Software”
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. “Incompatible With Secondary Licenses”
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of version
33 1.1 or earlier of the License, but not also under the terms of a
34 Secondary License.
35
361.6. “Executable Form”
37
38 means any form of the work other than Source Code Form.
39
401.7. “Larger Work”
41
42 means a work that combines Covered Software with other material, in a separate
43 file or files, that is not Covered Software.
44
451.8. “License”
46
47 means this document.
48
491.9. “Licensable”
50
51 means having the right to grant, to the maximum extent possible, whether at the
52 time of the initial grant or subsequently, any and all of the rights conveyed by
53 this License.
54
551.10. “Modifications”
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to, deletion
60 from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. “Patent Claims” of a Contributor
65
66 means any patent claim(s), including without limitation, method, process,
67 and apparatus claims, in any patent Licensable by such Contributor that
68 would be infringed, but for the grant of the License, by the making,
69 using, selling, offering for sale, having made, import, or transfer of
70 either its Contributions or its Contributor Version.
71
721.12. “Secondary License”
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. “Source Code Form”
79
80 means the form of the work preferred for making modifications.
81
821.14. “You” (or “Your”)
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, “You” includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, “control” means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or as
104 part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its Contributions
108 or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution become
113 effective for each Contribution on the date the Contributor first distributes
114 such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under this
119 License. No additional rights or licenses will be implied from the distribution
120 or licensing of Covered Software under this License. Notwithstanding Section
121 2.1(b) above, no patent license is granted by a Contributor:
122
123 a. for any code that a Contributor has removed from Covered Software; or
124
125 b. for infringements caused by: (i) Your and any other third party’s
126 modifications of Covered Software, or (ii) the combination of its
127 Contributions with other software (except as part of its Contributor
128 Version); or
129
130 c. under Patent Claims infringed by Covered Software in the absence of its
131 Contributions.
132
133 This License does not grant any rights in the trademarks, service marks, or
134 logos of any Contributor (except as may be necessary to comply with the
135 notice requirements in Section 3.4).
136
1372.4. Subsequent Licenses
138
139 No Contributor makes additional grants as a result of Your choice to
140 distribute the Covered Software under a subsequent version of this License
141 (see Section 10.2) or under the terms of a Secondary License (if permitted
142 under the terms of Section 3.3).
143
1442.5. Representation
145
146 Each Contributor represents that the Contributor believes its Contributions
147 are its original creation(s) or it has sufficient rights to grant the
148 rights to its Contributions conveyed by this License.
149
1502.6. Fair Use
151
152 This License is not intended to limit any rights You have under applicable
153 copyright doctrines of fair use, fair dealing, or other equivalents.
154
1552.7. Conditions
156
157 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
158 Section 2.1.
159
160
1613. Responsibilities
162
1633.1. Distribution of Source Form
164
165 All distribution of Covered Software in Source Code Form, including any
166 Modifications that You create or to which You contribute, must be under the
167 terms of this License. You must inform recipients that the Source Code Form
168 of the Covered Software is governed by the terms of this License, and how
169 they can obtain a copy of this License. You may not attempt to alter or
170 restrict the recipients’ rights in the Source Code Form.
171
1723.2. Distribution of Executable Form
173
174 If You distribute Covered Software in Executable Form then:
175
176 a. such Covered Software must also be made available in Source Code Form,
177 as described in Section 3.1, and You must inform recipients of the
178 Executable Form how they can obtain a copy of such Source Code Form by
179 reasonable means in a timely manner, at a charge no more than the cost
180 of distribution to the recipient; and
181
182 b. You may distribute such Executable Form under the terms of this License,
183 or sublicense it under different terms, provided that the license for
184 the Executable Form does not attempt to limit or alter the recipients’
185 rights in the Source Code Form under this License.
186
1873.3. Distribution of a Larger Work
188
189 You may create and distribute a Larger Work under terms of Your choice,
190 provided that You also comply with the requirements of this License for the
191 Covered Software. If the Larger Work is a combination of Covered Software
192 with a work governed by one or more Secondary Licenses, and the Covered
193 Software is not Incompatible With Secondary Licenses, this License permits
194 You to additionally distribute such Covered Software under the terms of
195 such Secondary License(s), so that the recipient of the Larger Work may, at
196 their option, further distribute the Covered Software under the terms of
197 either this License or such Secondary License(s).
198
1993.4. Notices
200
201 You may not remove or alter the substance of any license notices (including
202 copyright notices, patent notices, disclaimers of warranty, or limitations
203 of liability) contained within the Source Code Form of the Covered
204 Software, except that You may alter any license notices to the extent
205 required to remedy known factual inaccuracies.
206
2073.5. Application of Additional Terms
208
209 You may choose to offer, and to charge a fee for, warranty, support,
210 indemnity or liability obligations to one or more recipients of Covered
211 Software. However, You may do so only on Your own behalf, and not on behalf
212 of any Contributor. You must make it absolutely clear that any such
213 warranty, support, indemnity, or liability obligation is offered by You
214 alone, and You hereby agree to indemnify every Contributor for any
215 liability incurred by such Contributor as a result of warranty, support,
216 indemnity or liability terms You offer. You may include additional
217 disclaimers of warranty and limitations of liability specific to any
218 jurisdiction.
219
2204. Inability to Comply Due to Statute or Regulation
221
222 If it is impossible for You to comply with any of the terms of this License
223 with respect to some or all of the Covered Software due to statute, judicial
224 order, or regulation then You must: (a) comply with the terms of this License
225 to the maximum extent possible; and (b) describe the limitations and the code
226 they affect. Such description must be placed in a text file included with all
227 distributions of the Covered Software under this License. Except to the
228 extent prohibited by statute or regulation, such description must be
229 sufficiently detailed for a recipient of ordinary skill to be able to
230 understand it.
231
2325. Termination
233
2345.1. The rights granted under this License will terminate automatically if You
235 fail to comply with any of its terms. However, if You become compliant,
236 then the rights granted under this License from a particular Contributor
237 are reinstated (a) provisionally, unless and until such Contributor
238 explicitly and finally terminates Your grants, and (b) on an ongoing basis,
239 if such Contributor fails to notify You of the non-compliance by some
240 reasonable means prior to 60 days after You have come back into compliance.
241 Moreover, Your grants from a particular Contributor are reinstated on an
242 ongoing basis if such Contributor notifies You of the non-compliance by
243 some reasonable means, this is the first time You have received notice of
244 non-compliance with this License from such Contributor, and You become
245 compliant prior to 30 days after Your receipt of the notice.
246
2475.2. If You initiate litigation against any entity by asserting a patent
248 infringement claim (excluding declaratory judgment actions, counter-claims,
249 and cross-claims) alleging that a Contributor Version directly or
250 indirectly infringes any patent, then the rights granted to You by any and
251 all Contributors for the Covered Software under Section 2.1 of this License
252 shall terminate.
253
2545.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
255 license agreements (excluding distributors and resellers) which have been
256 validly granted by You or Your distributors under this License prior to
257 termination shall survive termination.
258
2596. Disclaimer of Warranty
260
261 Covered Software is provided under this License on an “as is” basis, without
262 warranty of any kind, either expressed, implied, or statutory, including,
263 without limitation, warranties that the Covered Software is free of defects,
264 merchantable, fit for a particular purpose or non-infringing. The entire
265 risk as to the quality and performance of the Covered Software is with You.
266 Should any Covered Software prove defective in any respect, You (not any
267 Contributor) assume the cost of any necessary servicing, repair, or
268 correction. This disclaimer of warranty constitutes an essential part of this
269 License. No use of any Covered Software is authorized under this License
270 except under this disclaimer.
271
2727. Limitation of Liability
273
274 Under no circumstances and under no legal theory, whether tort (including
275 negligence), contract, or otherwise, shall any Contributor, or anyone who
276 distributes Covered Software as permitted above, be liable to You for any
277 direct, indirect, special, incidental, or consequential damages of any
278 character including, without limitation, damages for lost profits, loss of
279 goodwill, work stoppage, computer failure or malfunction, or any and all
280 other commercial damages or losses, even if such party shall have been
281 informed of the possibility of such damages. This limitation of liability
282 shall not apply to liability for death or personal injury resulting from such
283 party’s negligence to the extent applicable law prohibits such limitation.
284 Some jurisdictions do not allow the exclusion or limitation of incidental or
285 consequential damages, so this exclusion and limitation may not apply to You.
286
2878. Litigation
288
289 Any litigation relating to this License may be brought only in the courts of
290 a jurisdiction where the defendant maintains its principal place of business
291 and such litigation shall be governed by laws of that jurisdiction, without
292 reference to its conflict-of-law provisions. Nothing in this Section shall
293 prevent a party’s ability to bring cross-claims or counter-claims.
294
2959. Miscellaneous
296
297 This License represents the complete agreement concerning the subject matter
298 hereof. If any provision of this License is held to be unenforceable, such
299 provision shall be reformed only to the extent necessary to make it
300 enforceable. Any law or regulation which provides that the language of a
301 contract shall be construed against the drafter shall not be used to construe
302 this License against a Contributor.
303
304
30510. Versions of the License
306
30710.1. New Versions
308
309 Mozilla Foundation is the license steward. Except as provided in Section
310 10.3, no one other than the license steward has the right to modify or
311 publish new versions of this License. Each version will be given a
312 distinguishing version number.
313
31410.2. Effect of New Versions
315
316 You may distribute the Covered Software under the terms of the version of
317 the License under which You originally received the Covered Software, or
318 under the terms of any subsequent version published by the license
319 steward.
320
32110.3. Modified Versions
322
323 If you create software not governed by this License, and you want to
324 create a new license for such software, you may create and use a modified
325 version of this License if you rename the license and remove any
326 references to the name of the license steward (except to note that such
327 modified license differs from this License).
328
32910.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
330 If You choose to distribute Source Code Form that is Incompatible With
331 Secondary Licenses under the terms of this version of the License, the
332 notice described in Exhibit B of this License must be attached.
333
334Exhibit A - Source Code Form License Notice
335
336 This Source Code Form is subject to the
337 terms of the Mozilla Public License, v.
338 2.0. If a copy of the MPL was not
339 distributed with this file, You can
340 obtain one at
341 http://mozilla.org/MPL/2.0/.
342
343If it is not possible or desirable to put the notice in a particular file, then
344You may include the notice in a location (such as a LICENSE file in a relevant
345directory) where a recipient would be likely to look for such a notice.
346
347You may add additional accurate notices of copyright ownership.
348
349Exhibit B - “Incompatible With Secondary Licenses” Notice
350
351 This Source Code Form is “Incompatible
352 With Secondary Licenses”, as defined by
353 the Mozilla Public License, v. 2.0.
354
diff --git a/vendor/github.com/hashicorp/logutils/README.md b/vendor/github.com/hashicorp/logutils/README.md
new file mode 100644
index 0000000..49490ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/logutils/README.md
@@ -0,0 +1,36 @@
1# logutils
2
3logutils is a Go package that augments the standard library "log" package
4to make logging a bit more modern, without fragmenting the Go ecosystem
5with new logging packages.
6
7## The simplest thing that could possibly work
8
9Presumably your application already uses the default `log` package. To switch, you'll want your code to look like the following:
10
11```go
12package main
13
14import (
15 "log"
16 "os"
17
18 "github.com/hashicorp/logutils"
19)
20
21func main() {
22 filter := &logutils.LevelFilter{
23 Levels: []logutils.LogLevel{"DEBUG", "WARN", "ERROR"},
24 MinLevel: logutils.LogLevel("WARN"),
25 Writer: os.Stderr,
26 }
27 log.SetOutput(filter)
28
29 log.Print("[DEBUG] Debugging") // this will not print
30 log.Print("[WARN] Warning") // this will
31 log.Print("[ERROR] Erring") // and so will this
32 log.Print("Message I haven't updated") // and so will this
33}
34```
35
36This logs to standard error exactly like go's standard logger. Any log messages you haven't converted to have a level will continue to print as before.
diff --git a/vendor/github.com/hashicorp/logutils/level.go b/vendor/github.com/hashicorp/logutils/level.go
new file mode 100644
index 0000000..6381bf1
--- /dev/null
+++ b/vendor/github.com/hashicorp/logutils/level.go
@@ -0,0 +1,81 @@
1// Package logutils augments the standard log package with levels.
2package logutils
3
4import (
5 "bytes"
6 "io"
7 "sync"
8)
9
10type LogLevel string
11
12// LevelFilter is an io.Writer that can be used with a logger that
13// will filter out log messages that aren't at least a certain level.
14//
15// Once the filter is in use somewhere, it is not safe to modify
16// the structure.
17type LevelFilter struct {
18 // Levels is the list of log levels, in increasing order of
19 // severity. Example might be: {"DEBUG", "WARN", "ERROR"}.
20 Levels []LogLevel
21
22 // MinLevel is the minimum level allowed through
23 MinLevel LogLevel
24
25 // The underlying io.Writer where log messages that pass the filter
26 // will be set.
27 Writer io.Writer
28
29 badLevels map[LogLevel]struct{}
30 once sync.Once
31}
32
33// Check will check a given line if it would be included in the level
34// filter.
35func (f *LevelFilter) Check(line []byte) bool {
36 f.once.Do(f.init)
37
38 // Check for a log level
39 var level LogLevel
40 x := bytes.IndexByte(line, '[')
41 if x >= 0 {
42 y := bytes.IndexByte(line[x:], ']')
43 if y >= 0 {
44 level = LogLevel(line[x+1 : x+y])
45 }
46 }
47
48 _, ok := f.badLevels[level]
49 return !ok
50}
51
52func (f *LevelFilter) Write(p []byte) (n int, err error) {
53 // Note in general that io.Writer can receive any byte sequence
54 // to write, but the "log" package always guarantees that we only
55 // get a single line. We use that as a slight optimization within
56 // this method, assuming we're dealing with a single, complete line
57 // of log data.
58
59 if !f.Check(p) {
60 return len(p), nil
61 }
62
63 return f.Writer.Write(p)
64}
65
66// SetMinLevel is used to update the minimum log level
67func (f *LevelFilter) SetMinLevel(min LogLevel) {
68 f.MinLevel = min
69 f.init()
70}
71
72func (f *LevelFilter) init() {
73 badLevels := make(map[LogLevel]struct{})
74 for _, level := range f.Levels {
75 if level == f.MinLevel {
76 break
77 }
78 badLevels[level] = struct{}{}
79 }
80 f.badLevels = badLevels
81}
diff --git a/vendor/github.com/hashicorp/terraform/LICENSE b/vendor/github.com/hashicorp/terraform/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/LICENSE
@@ -0,0 +1,354 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. “Contributor”
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. “Contributor Version”
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor’s Contribution.
14
151.3. “Contribution”
16
17 means Covered Software of a particular Contributor.
18
191.4. “Covered Software”
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. “Incompatible With Secondary Licenses”
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of version
33 1.1 or earlier of the License, but not also under the terms of a
34 Secondary License.
35
361.6. “Executable Form”
37
38 means any form of the work other than Source Code Form.
39
401.7. “Larger Work”
41
42 means a work that combines Covered Software with other material, in a separate
43 file or files, that is not Covered Software.
44
451.8. “License”
46
47 means this document.
48
491.9. “Licensable”
50
51 means having the right to grant, to the maximum extent possible, whether at the
52 time of the initial grant or subsequently, any and all of the rights conveyed by
53 this License.
54
551.10. “Modifications”
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to, deletion
60 from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. “Patent Claims” of a Contributor
65
66 means any patent claim(s), including without limitation, method, process,
67 and apparatus claims, in any patent Licensable by such Contributor that
68 would be infringed, but for the grant of the License, by the making,
69 using, selling, offering for sale, having made, import, or transfer of
70 either its Contributions or its Contributor Version.
71
721.12. “Secondary License”
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. “Source Code Form”
79
80 means the form of the work preferred for making modifications.
81
821.14. “You” (or “Your”)
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, “You” includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, “control” means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or as
104 part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its Contributions
108 or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution become
113 effective for each Contribution on the date the Contributor first distributes
114 such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under this
119 License. No additional rights or licenses will be implied from the distribution
120 or licensing of Covered Software under this License. Notwithstanding Section
121 2.1(b) above, no patent license is granted by a Contributor:
122
123 a. for any code that a Contributor has removed from Covered Software; or
124
125 b. for infringements caused by: (i) Your and any other third party’s
126 modifications of Covered Software, or (ii) the combination of its
127 Contributions with other software (except as part of its Contributor
128 Version); or
129
130 c. under Patent Claims infringed by Covered Software in the absence of its
131 Contributions.
132
133 This License does not grant any rights in the trademarks, service marks, or
134 logos of any Contributor (except as may be necessary to comply with the
135 notice requirements in Section 3.4).
136
1372.4. Subsequent Licenses
138
139 No Contributor makes additional grants as a result of Your choice to
140 distribute the Covered Software under a subsequent version of this License
141 (see Section 10.2) or under the terms of a Secondary License (if permitted
142 under the terms of Section 3.3).
143
1442.5. Representation
145
146 Each Contributor represents that the Contributor believes its Contributions
147 are its original creation(s) or it has sufficient rights to grant the
148 rights to its Contributions conveyed by this License.
149
1502.6. Fair Use
151
152 This License is not intended to limit any rights You have under applicable
153 copyright doctrines of fair use, fair dealing, or other equivalents.
154
1552.7. Conditions
156
157 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
158 Section 2.1.
159
160
1613. Responsibilities
162
1633.1. Distribution of Source Form
164
165 All distribution of Covered Software in Source Code Form, including any
166 Modifications that You create or to which You contribute, must be under the
167 terms of this License. You must inform recipients that the Source Code Form
168 of the Covered Software is governed by the terms of this License, and how
169 they can obtain a copy of this License. You may not attempt to alter or
170 restrict the recipients’ rights in the Source Code Form.
171
1723.2. Distribution of Executable Form
173
174 If You distribute Covered Software in Executable Form then:
175
176 a. such Covered Software must also be made available in Source Code Form,
177 as described in Section 3.1, and You must inform recipients of the
178 Executable Form how they can obtain a copy of such Source Code Form by
179 reasonable means in a timely manner, at a charge no more than the cost
180 of distribution to the recipient; and
181
182 b. You may distribute such Executable Form under the terms of this License,
183 or sublicense it under different terms, provided that the license for
184 the Executable Form does not attempt to limit or alter the recipients’
185 rights in the Source Code Form under this License.
186
1873.3. Distribution of a Larger Work
188
189 You may create and distribute a Larger Work under terms of Your choice,
190 provided that You also comply with the requirements of this License for the
191 Covered Software. If the Larger Work is a combination of Covered Software
192 with a work governed by one or more Secondary Licenses, and the Covered
193 Software is not Incompatible With Secondary Licenses, this License permits
194 You to additionally distribute such Covered Software under the terms of
195 such Secondary License(s), so that the recipient of the Larger Work may, at
196 their option, further distribute the Covered Software under the terms of
197 either this License or such Secondary License(s).
198
1993.4. Notices
200
201 You may not remove or alter the substance of any license notices (including
202 copyright notices, patent notices, disclaimers of warranty, or limitations
203 of liability) contained within the Source Code Form of the Covered
204 Software, except that You may alter any license notices to the extent
205 required to remedy known factual inaccuracies.
206
2073.5. Application of Additional Terms
208
209 You may choose to offer, and to charge a fee for, warranty, support,
210 indemnity or liability obligations to one or more recipients of Covered
211 Software. However, You may do so only on Your own behalf, and not on behalf
212 of any Contributor. You must make it absolutely clear that any such
213 warranty, support, indemnity, or liability obligation is offered by You
214 alone, and You hereby agree to indemnify every Contributor for any
215 liability incurred by such Contributor as a result of warranty, support,
216 indemnity or liability terms You offer. You may include additional
217 disclaimers of warranty and limitations of liability specific to any
218 jurisdiction.
219
2204. Inability to Comply Due to Statute or Regulation
221
222 If it is impossible for You to comply with any of the terms of this License
223 with respect to some or all of the Covered Software due to statute, judicial
224 order, or regulation then You must: (a) comply with the terms of this License
225 to the maximum extent possible; and (b) describe the limitations and the code
226 they affect. Such description must be placed in a text file included with all
227 distributions of the Covered Software under this License. Except to the
228 extent prohibited by statute or regulation, such description must be
229 sufficiently detailed for a recipient of ordinary skill to be able to
230 understand it.
231
2325. Termination
233
2345.1. The rights granted under this License will terminate automatically if You
235 fail to comply with any of its terms. However, if You become compliant,
236 then the rights granted under this License from a particular Contributor
237 are reinstated (a) provisionally, unless and until such Contributor
238 explicitly and finally terminates Your grants, and (b) on an ongoing basis,
239 if such Contributor fails to notify You of the non-compliance by some
240 reasonable means prior to 60 days after You have come back into compliance.
241 Moreover, Your grants from a particular Contributor are reinstated on an
242 ongoing basis if such Contributor notifies You of the non-compliance by
243 some reasonable means, this is the first time You have received notice of
244 non-compliance with this License from such Contributor, and You become
245 compliant prior to 30 days after Your receipt of the notice.
246
2475.2. If You initiate litigation against any entity by asserting a patent
248 infringement claim (excluding declaratory judgment actions, counter-claims,
249 and cross-claims) alleging that a Contributor Version directly or
250 indirectly infringes any patent, then the rights granted to You by any and
251 all Contributors for the Covered Software under Section 2.1 of this License
252 shall terminate.
253
2545.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
255 license agreements (excluding distributors and resellers) which have been
256 validly granted by You or Your distributors under this License prior to
257 termination shall survive termination.
258
2596. Disclaimer of Warranty
260
261 Covered Software is provided under this License on an “as is” basis, without
262 warranty of any kind, either expressed, implied, or statutory, including,
263 without limitation, warranties that the Covered Software is free of defects,
264 merchantable, fit for a particular purpose or non-infringing. The entire
265 risk as to the quality and performance of the Covered Software is with You.
266 Should any Covered Software prove defective in any respect, You (not any
267 Contributor) assume the cost of any necessary servicing, repair, or
268 correction. This disclaimer of warranty constitutes an essential part of this
269 License. No use of any Covered Software is authorized under this License
270 except under this disclaimer.
271
2727. Limitation of Liability
273
274 Under no circumstances and under no legal theory, whether tort (including
275 negligence), contract, or otherwise, shall any Contributor, or anyone who
276 distributes Covered Software as permitted above, be liable to You for any
277 direct, indirect, special, incidental, or consequential damages of any
278 character including, without limitation, damages for lost profits, loss of
279 goodwill, work stoppage, computer failure or malfunction, or any and all
280 other commercial damages or losses, even if such party shall have been
281 informed of the possibility of such damages. This limitation of liability
282 shall not apply to liability for death or personal injury resulting from such
283 party’s negligence to the extent applicable law prohibits such limitation.
284 Some jurisdictions do not allow the exclusion or limitation of incidental or
285 consequential damages, so this exclusion and limitation may not apply to You.
286
2878. Litigation
288
289 Any litigation relating to this License may be brought only in the courts of
290 a jurisdiction where the defendant maintains its principal place of business
291 and such litigation shall be governed by laws of that jurisdiction, without
292 reference to its conflict-of-law provisions. Nothing in this Section shall
293 prevent a party’s ability to bring cross-claims or counter-claims.
294
2959. Miscellaneous
296
297 This License represents the complete agreement concerning the subject matter
298 hereof. If any provision of this License is held to be unenforceable, such
299 provision shall be reformed only to the extent necessary to make it
300 enforceable. Any law or regulation which provides that the language of a
301 contract shall be construed against the drafter shall not be used to construe
302 this License against a Contributor.
303
304
30510. Versions of the License
306
30710.1. New Versions
308
309 Mozilla Foundation is the license steward. Except as provided in Section
310 10.3, no one other than the license steward has the right to modify or
311 publish new versions of this License. Each version will be given a
312 distinguishing version number.
313
31410.2. Effect of New Versions
315
316 You may distribute the Covered Software under the terms of the version of
317 the License under which You originally received the Covered Software, or
318 under the terms of any subsequent version published by the license
319 steward.
320
32110.3. Modified Versions
322
323 If you create software not governed by this License, and you want to
324 create a new license for such software, you may create and use a modified
325 version of this License if you rename the license and remove any
326 references to the name of the license steward (except to note that such
327 modified license differs from this License).
328
32910.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
330 If You choose to distribute Source Code Form that is Incompatible With
331 Secondary Licenses under the terms of this version of the License, the
332 notice described in Exhibit B of this License must be attached.
333
334Exhibit A - Source Code Form License Notice
335
336 This Source Code Form is subject to the
337 terms of the Mozilla Public License, v.
338 2.0. If a copy of the MPL was not
339 distributed with this file, You can
340 obtain one at
341 http://mozilla.org/MPL/2.0/.
342
343If it is not possible or desirable to put the notice in a particular file, then
344You may include the notice in a location (such as a LICENSE file in a relevant
345directory) where a recipient would be likely to look for such a notice.
346
347You may add additional accurate notices of copyright ownership.
348
349Exhibit B - “Incompatible With Secondary Licenses” Notice
350
351 This Source Code Form is “Incompatible
352 With Secondary Licenses”, as defined by
353 the Mozilla Public License, v. 2.0.
354
diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go
new file mode 100644
index 0000000..5f4e89e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/append.go
@@ -0,0 +1,86 @@
1package config
2
3// Append appends one configuration to another.
4//
5// Append assumes that both configurations will not have
6// conflicting variables, resources, etc. If they do, the
7// problems will be caught in the validation phase.
8//
9// It is possible that c1, c2 on their own are not valid. For
10// example, a resource in c2 may reference a variable in c1. But
11// together, they would be valid.
12func Append(c1, c2 *Config) (*Config, error) {
13 c := new(Config)
14
15 // Append unknown keys, but keep them unique since it is a set
16 unknowns := make(map[string]struct{})
17 for _, k := range c1.unknownKeys {
18 _, present := unknowns[k]
19 if !present {
20 unknowns[k] = struct{}{}
21 c.unknownKeys = append(c.unknownKeys, k)
22 }
23 }
24
25 for _, k := range c2.unknownKeys {
26 _, present := unknowns[k]
27 if !present {
28 unknowns[k] = struct{}{}
29 c.unknownKeys = append(c.unknownKeys, k)
30 }
31 }
32
33 c.Atlas = c1.Atlas
34 if c2.Atlas != nil {
35 c.Atlas = c2.Atlas
36 }
37
38 // merge Terraform blocks
39 if c1.Terraform != nil {
40 c.Terraform = c1.Terraform
41 if c2.Terraform != nil {
42 c.Terraform.Merge(c2.Terraform)
43 }
44 } else {
45 c.Terraform = c2.Terraform
46 }
47
48 if len(c1.Modules) > 0 || len(c2.Modules) > 0 {
49 c.Modules = make(
50 []*Module, 0, len(c1.Modules)+len(c2.Modules))
51 c.Modules = append(c.Modules, c1.Modules...)
52 c.Modules = append(c.Modules, c2.Modules...)
53 }
54
55 if len(c1.Outputs) > 0 || len(c2.Outputs) > 0 {
56 c.Outputs = make(
57 []*Output, 0, len(c1.Outputs)+len(c2.Outputs))
58 c.Outputs = append(c.Outputs, c1.Outputs...)
59 c.Outputs = append(c.Outputs, c2.Outputs...)
60 }
61
62 if len(c1.ProviderConfigs) > 0 || len(c2.ProviderConfigs) > 0 {
63 c.ProviderConfigs = make(
64 []*ProviderConfig,
65 0, len(c1.ProviderConfigs)+len(c2.ProviderConfigs))
66 c.ProviderConfigs = append(c.ProviderConfigs, c1.ProviderConfigs...)
67 c.ProviderConfigs = append(c.ProviderConfigs, c2.ProviderConfigs...)
68 }
69
70 if len(c1.Resources) > 0 || len(c2.Resources) > 0 {
71 c.Resources = make(
72 []*Resource,
73 0, len(c1.Resources)+len(c2.Resources))
74 c.Resources = append(c.Resources, c1.Resources...)
75 c.Resources = append(c.Resources, c2.Resources...)
76 }
77
78 if len(c1.Variables) > 0 || len(c2.Variables) > 0 {
79 c.Variables = make(
80 []*Variable, 0, len(c1.Variables)+len(c2.Variables))
81 c.Variables = append(c.Variables, c1.Variables...)
82 c.Variables = append(c.Variables, c2.Variables...)
83 }
84
85 return c, nil
86}
diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go
new file mode 100644
index 0000000..9a764ac
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config.go
@@ -0,0 +1,1096 @@
1// The config package is responsible for loading and validating the
2// configuration.
3package config
4
5import (
6 "fmt"
7 "regexp"
8 "strconv"
9 "strings"
10
11 "github.com/hashicorp/go-multierror"
12 "github.com/hashicorp/hil"
13 "github.com/hashicorp/hil/ast"
14 "github.com/hashicorp/terraform/helper/hilmapstructure"
15 "github.com/mitchellh/reflectwalk"
16)
17
18// NameRegexp is the regular expression that all names (modules, providers,
19// resources, etc.) must follow.
20var NameRegexp = regexp.MustCompile(`(?i)\A[A-Z0-9_][A-Z0-9\-\_]*\z`)
21
22// Config is the configuration that comes from loading a collection
23// of Terraform templates.
24type Config struct {
25 // Dir is the path to the directory where this configuration was
26 // loaded from. If it is blank, this configuration wasn't loaded from
27 // any meaningful directory.
28 Dir string
29
30 Terraform *Terraform
31 Atlas *AtlasConfig
32 Modules []*Module
33 ProviderConfigs []*ProviderConfig
34 Resources []*Resource
35 Variables []*Variable
36 Outputs []*Output
37
38 // The fields below can be filled in by loaders for validation
39 // purposes.
40 unknownKeys []string
41}
42
43// AtlasConfig is the configuration for building in HashiCorp's Atlas.
44type AtlasConfig struct {
45 Name string
46 Include []string
47 Exclude []string
48}
49
50// Module is a module used within a configuration.
51//
52// This does not represent a module itself, this represents a module
53// call-site within an existing configuration.
54type Module struct {
55 Name string
56 Source string
57 RawConfig *RawConfig
58}
59
60// ProviderConfig is the configuration for a resource provider.
61//
62// For example, Terraform needs to set the AWS access keys for the AWS
63// resource provider.
64type ProviderConfig struct {
65 Name string
66 Alias string
67 RawConfig *RawConfig
68}
69
70// A resource represents a single Terraform resource in the configuration.
71// A Terraform resource is something that supports some or all of the
72// usual "create, read, update, delete" operations, depending on
73// the given Mode.
74type Resource struct {
75 Mode ResourceMode // which operations the resource supports
76 Name string
77 Type string
78 RawCount *RawConfig
79 RawConfig *RawConfig
80 Provisioners []*Provisioner
81 Provider string
82 DependsOn []string
83 Lifecycle ResourceLifecycle
84}
85
86// Copy returns a copy of this Resource. Helpful for avoiding shared
87// config pointers across multiple pieces of the graph that need to do
88// interpolation.
89func (r *Resource) Copy() *Resource {
90 n := &Resource{
91 Mode: r.Mode,
92 Name: r.Name,
93 Type: r.Type,
94 RawCount: r.RawCount.Copy(),
95 RawConfig: r.RawConfig.Copy(),
96 Provisioners: make([]*Provisioner, 0, len(r.Provisioners)),
97 Provider: r.Provider,
98 DependsOn: make([]string, len(r.DependsOn)),
99 Lifecycle: *r.Lifecycle.Copy(),
100 }
101 for _, p := range r.Provisioners {
102 n.Provisioners = append(n.Provisioners, p.Copy())
103 }
104 copy(n.DependsOn, r.DependsOn)
105 return n
106}
107
108// ResourceLifecycle is used to store the lifecycle tuning parameters
109// to allow customized behavior
110type ResourceLifecycle struct {
111 CreateBeforeDestroy bool `mapstructure:"create_before_destroy"`
112 PreventDestroy bool `mapstructure:"prevent_destroy"`
113 IgnoreChanges []string `mapstructure:"ignore_changes"`
114}
115
116// Copy returns a copy of this ResourceLifecycle
117func (r *ResourceLifecycle) Copy() *ResourceLifecycle {
118 n := &ResourceLifecycle{
119 CreateBeforeDestroy: r.CreateBeforeDestroy,
120 PreventDestroy: r.PreventDestroy,
121 IgnoreChanges: make([]string, len(r.IgnoreChanges)),
122 }
123 copy(n.IgnoreChanges, r.IgnoreChanges)
124 return n
125}
126
127// Provisioner is a configured provisioner step on a resource.
128type Provisioner struct {
129 Type string
130 RawConfig *RawConfig
131 ConnInfo *RawConfig
132
133 When ProvisionerWhen
134 OnFailure ProvisionerOnFailure
135}
136
137// Copy returns a copy of this Provisioner
138func (p *Provisioner) Copy() *Provisioner {
139 return &Provisioner{
140 Type: p.Type,
141 RawConfig: p.RawConfig.Copy(),
142 ConnInfo: p.ConnInfo.Copy(),
143 When: p.When,
144 OnFailure: p.OnFailure,
145 }
146}
147
148// Variable is a variable defined within the configuration.
149type Variable struct {
150 Name string
151 DeclaredType string `mapstructure:"type"`
152 Default interface{}
153 Description string
154}
155
156// Output is an output defined within the configuration. An output is
157// resulting data that is highlighted by Terraform when finished. An
158// output marked Sensitive will be output in a masked form following
159// application, but will still be available in state.
160type Output struct {
161 Name string
162 DependsOn []string
163 Description string
164 Sensitive bool
165 RawConfig *RawConfig
166}
167
168// VariableType is the type of value a variable is holding, and returned
169// by the Type() function on variables.
170type VariableType byte
171
172const (
173 VariableTypeUnknown VariableType = iota
174 VariableTypeString
175 VariableTypeList
176 VariableTypeMap
177)
178
179func (v VariableType) Printable() string {
180 switch v {
181 case VariableTypeString:
182 return "string"
183 case VariableTypeMap:
184 return "map"
185 case VariableTypeList:
186 return "list"
187 default:
188 return "unknown"
189 }
190}
191
192// ProviderConfigName returns the name of the provider configuration in
193// the given mapping that maps to the proper provider configuration
194// for this resource.
195func ProviderConfigName(t string, pcs []*ProviderConfig) string {
196 lk := ""
197 for _, v := range pcs {
198 k := v.Name
199 if strings.HasPrefix(t, k) && len(k) > len(lk) {
200 lk = k
201 }
202 }
203
204 return lk
205}
206
207// A unique identifier for this module.
208func (r *Module) Id() string {
209 return fmt.Sprintf("%s", r.Name)
210}
211
212// Count returns the count of this resource.
213func (r *Resource) Count() (int, error) {
214 raw := r.RawCount.Value()
215 count, ok := r.RawCount.Value().(string)
216 if !ok {
217 return 0, fmt.Errorf(
218 "expected count to be a string or int, got %T", raw)
219 }
220
221 v, err := strconv.ParseInt(count, 0, 0)
222 if err != nil {
223 return 0, err
224 }
225
226 return int(v), nil
227}
228
229// A unique identifier for this resource.
230func (r *Resource) Id() string {
231 switch r.Mode {
232 case ManagedResourceMode:
233 return fmt.Sprintf("%s.%s", r.Type, r.Name)
234 case DataResourceMode:
235 return fmt.Sprintf("data.%s.%s", r.Type, r.Name)
236 default:
237 panic(fmt.Errorf("unknown resource mode %s", r.Mode))
238 }
239}
240
241// Validate does some basic semantic checking of the configuration.
242func (c *Config) Validate() error {
243 if c == nil {
244 return nil
245 }
246
247 var errs []error
248
249 for _, k := range c.unknownKeys {
250 errs = append(errs, fmt.Errorf(
251 "Unknown root level key: %s", k))
252 }
253
254 // Validate the Terraform config
255 if tf := c.Terraform; tf != nil {
256 errs = append(errs, c.Terraform.Validate()...)
257 }
258
259 vars := c.InterpolatedVariables()
260 varMap := make(map[string]*Variable)
261 for _, v := range c.Variables {
262 if _, ok := varMap[v.Name]; ok {
263 errs = append(errs, fmt.Errorf(
264 "Variable '%s': duplicate found. Variable names must be unique.",
265 v.Name))
266 }
267
268 varMap[v.Name] = v
269 }
270
271 for k, _ := range varMap {
272 if !NameRegexp.MatchString(k) {
273 errs = append(errs, fmt.Errorf(
274 "variable %q: variable name must match regular expresion %s",
275 k, NameRegexp))
276 }
277 }
278
279 for _, v := range c.Variables {
280 if v.Type() == VariableTypeUnknown {
281 errs = append(errs, fmt.Errorf(
282 "Variable '%s': must be a string or a map",
283 v.Name))
284 continue
285 }
286
287 interp := false
288 fn := func(n ast.Node) (interface{}, error) {
289 // LiteralNode is a literal string (outside of a ${ ... } sequence).
290 // interpolationWalker skips most of these. but in particular it
291 // visits those that have escaped sequences (like $${foo}) as a
292 // signal that *some* processing is required on this string. For
293 // our purposes here though, this is fine and not an interpolation.
294 if _, ok := n.(*ast.LiteralNode); !ok {
295 interp = true
296 }
297 return "", nil
298 }
299
300 w := &interpolationWalker{F: fn}
301 if v.Default != nil {
302 if err := reflectwalk.Walk(v.Default, w); err == nil {
303 if interp {
304 errs = append(errs, fmt.Errorf(
305 "Variable '%s': cannot contain interpolations",
306 v.Name))
307 }
308 }
309 }
310 }
311
312 // Check for references to user variables that do not actually
313 // exist and record those errors.
314 for source, vs := range vars {
315 for _, v := range vs {
316 uv, ok := v.(*UserVariable)
317 if !ok {
318 continue
319 }
320
321 if _, ok := varMap[uv.Name]; !ok {
322 errs = append(errs, fmt.Errorf(
323 "%s: unknown variable referenced: '%s'. define it with 'variable' blocks",
324 source,
325 uv.Name))
326 }
327 }
328 }
329
330 // Check that all count variables are valid.
331 for source, vs := range vars {
332 for _, rawV := range vs {
333 switch v := rawV.(type) {
334 case *CountVariable:
335 if v.Type == CountValueInvalid {
336 errs = append(errs, fmt.Errorf(
337 "%s: invalid count variable: %s",
338 source,
339 v.FullKey()))
340 }
341 case *PathVariable:
342 if v.Type == PathValueInvalid {
343 errs = append(errs, fmt.Errorf(
344 "%s: invalid path variable: %s",
345 source,
346 v.FullKey()))
347 }
348 }
349 }
350 }
351
352 // Check that providers aren't declared multiple times.
353 providerSet := make(map[string]struct{})
354 for _, p := range c.ProviderConfigs {
355 name := p.FullName()
356 if _, ok := providerSet[name]; ok {
357 errs = append(errs, fmt.Errorf(
358 "provider.%s: declared multiple times, you can only declare a provider once",
359 name))
360 continue
361 }
362
363 providerSet[name] = struct{}{}
364 }
365
366 // Check that all references to modules are valid
367 modules := make(map[string]*Module)
368 dupped := make(map[string]struct{})
369 for _, m := range c.Modules {
370 // Check for duplicates
371 if _, ok := modules[m.Id()]; ok {
372 if _, ok := dupped[m.Id()]; !ok {
373 dupped[m.Id()] = struct{}{}
374
375 errs = append(errs, fmt.Errorf(
376 "%s: module repeated multiple times",
377 m.Id()))
378 }
379
380 // Already seen this module, just skip it
381 continue
382 }
383
384 modules[m.Id()] = m
385
386 // Check that the source has no interpolations
387 rc, err := NewRawConfig(map[string]interface{}{
388 "root": m.Source,
389 })
390 if err != nil {
391 errs = append(errs, fmt.Errorf(
392 "%s: module source error: %s",
393 m.Id(), err))
394 } else if len(rc.Interpolations) > 0 {
395 errs = append(errs, fmt.Errorf(
396 "%s: module source cannot contain interpolations",
397 m.Id()))
398 }
399
400 // Check that the name matches our regexp
401 if !NameRegexp.Match([]byte(m.Name)) {
402 errs = append(errs, fmt.Errorf(
403 "%s: module name can only contain letters, numbers, "+
404 "dashes, and underscores",
405 m.Id()))
406 }
407
408 // Check that the configuration can all be strings, lists or maps
409 raw := make(map[string]interface{})
410 for k, v := range m.RawConfig.Raw {
411 var strVal string
412 if err := hilmapstructure.WeakDecode(v, &strVal); err == nil {
413 raw[k] = strVal
414 continue
415 }
416
417 var mapVal map[string]interface{}
418 if err := hilmapstructure.WeakDecode(v, &mapVal); err == nil {
419 raw[k] = mapVal
420 continue
421 }
422
423 var sliceVal []interface{}
424 if err := hilmapstructure.WeakDecode(v, &sliceVal); err == nil {
425 raw[k] = sliceVal
426 continue
427 }
428
429 errs = append(errs, fmt.Errorf(
430 "%s: variable %s must be a string, list or map value",
431 m.Id(), k))
432 }
433
434 // Check for invalid count variables
435 for _, v := range m.RawConfig.Variables {
436 switch v.(type) {
437 case *CountVariable:
438 errs = append(errs, fmt.Errorf(
439 "%s: count variables are only valid within resources", m.Name))
440 case *SelfVariable:
441 errs = append(errs, fmt.Errorf(
442 "%s: self variables are only valid within resources", m.Name))
443 }
444 }
445
446 // Update the raw configuration to only contain the string values
447 m.RawConfig, err = NewRawConfig(raw)
448 if err != nil {
449 errs = append(errs, fmt.Errorf(
450 "%s: can't initialize configuration: %s",
451 m.Id(), err))
452 }
453 }
454 dupped = nil
455
456 // Check that all variables for modules reference modules that
457 // exist.
458 for source, vs := range vars {
459 for _, v := range vs {
460 mv, ok := v.(*ModuleVariable)
461 if !ok {
462 continue
463 }
464
465 if _, ok := modules[mv.Name]; !ok {
466 errs = append(errs, fmt.Errorf(
467 "%s: unknown module referenced: %s",
468 source,
469 mv.Name))
470 }
471 }
472 }
473
474 // Check that all references to resources are valid
475 resources := make(map[string]*Resource)
476 dupped = make(map[string]struct{})
477 for _, r := range c.Resources {
478 if _, ok := resources[r.Id()]; ok {
479 if _, ok := dupped[r.Id()]; !ok {
480 dupped[r.Id()] = struct{}{}
481
482 errs = append(errs, fmt.Errorf(
483 "%s: resource repeated multiple times",
484 r.Id()))
485 }
486 }
487
488 resources[r.Id()] = r
489 }
490 dupped = nil
491
492 // Validate resources
493 for n, r := range resources {
494 // Verify count variables
495 for _, v := range r.RawCount.Variables {
496 switch v.(type) {
497 case *CountVariable:
498 errs = append(errs, fmt.Errorf(
499 "%s: resource count can't reference count variable: %s",
500 n,
501 v.FullKey()))
502 case *SimpleVariable:
503 errs = append(errs, fmt.Errorf(
504 "%s: resource count can't reference variable: %s",
505 n,
506 v.FullKey()))
507
508 // Good
509 case *ModuleVariable:
510 case *ResourceVariable:
511 case *TerraformVariable:
512 case *UserVariable:
513
514 default:
515 errs = append(errs, fmt.Errorf(
516 "Internal error. Unknown type in count var in %s: %T",
517 n, v))
518 }
519 }
520
521 // Interpolate with a fixed number to verify that its a number.
522 r.RawCount.interpolate(func(root ast.Node) (interface{}, error) {
523 // Execute the node but transform the AST so that it returns
524 // a fixed value of "5" for all interpolations.
525 result, err := hil.Eval(
526 hil.FixedValueTransform(
527 root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}),
528 nil)
529 if err != nil {
530 return "", err
531 }
532
533 return result.Value, nil
534 })
535 _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0)
536 if err != nil {
537 errs = append(errs, fmt.Errorf(
538 "%s: resource count must be an integer",
539 n))
540 }
541 r.RawCount.init()
542
543 // Validate DependsOn
544 errs = append(errs, c.validateDependsOn(n, r.DependsOn, resources, modules)...)
545
546 // Verify provisioners
547 for _, p := range r.Provisioners {
548 // This validation checks that there are now splat variables
549 // referencing ourself. This currently is not allowed.
550
551 for _, v := range p.ConnInfo.Variables {
552 rv, ok := v.(*ResourceVariable)
553 if !ok {
554 continue
555 }
556
557 if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name {
558 errs = append(errs, fmt.Errorf(
559 "%s: connection info cannot contain splat variable "+
560 "referencing itself", n))
561 break
562 }
563 }
564
565 for _, v := range p.RawConfig.Variables {
566 rv, ok := v.(*ResourceVariable)
567 if !ok {
568 continue
569 }
570
571 if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name {
572 errs = append(errs, fmt.Errorf(
573 "%s: connection info cannot contain splat variable "+
574 "referencing itself", n))
575 break
576 }
577 }
578
579 // Check for invalid when/onFailure values, though this should be
580 // picked up by the loader we check here just in case.
581 if p.When == ProvisionerWhenInvalid {
582 errs = append(errs, fmt.Errorf(
583 "%s: provisioner 'when' value is invalid", n))
584 }
585 if p.OnFailure == ProvisionerOnFailureInvalid {
586 errs = append(errs, fmt.Errorf(
587 "%s: provisioner 'on_failure' value is invalid", n))
588 }
589 }
590
591 // Verify ignore_changes contains valid entries
592 for _, v := range r.Lifecycle.IgnoreChanges {
593 if strings.Contains(v, "*") && v != "*" {
594 errs = append(errs, fmt.Errorf(
595 "%s: ignore_changes does not support using a partial string "+
596 "together with a wildcard: %s", n, v))
597 }
598 }
599
600 // Verify ignore_changes has no interpolations
601 rc, err := NewRawConfig(map[string]interface{}{
602 "root": r.Lifecycle.IgnoreChanges,
603 })
604 if err != nil {
605 errs = append(errs, fmt.Errorf(
606 "%s: lifecycle ignore_changes error: %s",
607 n, err))
608 } else if len(rc.Interpolations) > 0 {
609 errs = append(errs, fmt.Errorf(
610 "%s: lifecycle ignore_changes cannot contain interpolations",
611 n))
612 }
613
614 // If it is a data source then it can't have provisioners
615 if r.Mode == DataResourceMode {
616 if _, ok := r.RawConfig.Raw["provisioner"]; ok {
617 errs = append(errs, fmt.Errorf(
618 "%s: data sources cannot have provisioners",
619 n))
620 }
621 }
622 }
623
624 for source, vs := range vars {
625 for _, v := range vs {
626 rv, ok := v.(*ResourceVariable)
627 if !ok {
628 continue
629 }
630
631 id := rv.ResourceId()
632 if _, ok := resources[id]; !ok {
633 errs = append(errs, fmt.Errorf(
634 "%s: unknown resource '%s' referenced in variable %s",
635 source,
636 id,
637 rv.FullKey()))
638 continue
639 }
640 }
641 }
642
643 // Check that all outputs are valid
644 {
645 found := make(map[string]struct{})
646 for _, o := range c.Outputs {
647 // Verify the output is new
648 if _, ok := found[o.Name]; ok {
649 errs = append(errs, fmt.Errorf(
650 "%s: duplicate output. output names must be unique.",
651 o.Name))
652 continue
653 }
654 found[o.Name] = struct{}{}
655
656 var invalidKeys []string
657 valueKeyFound := false
658 for k := range o.RawConfig.Raw {
659 if k == "value" {
660 valueKeyFound = true
661 continue
662 }
663 if k == "sensitive" {
664 if sensitive, ok := o.RawConfig.config[k].(bool); ok {
665 if sensitive {
666 o.Sensitive = true
667 }
668 continue
669 }
670
671 errs = append(errs, fmt.Errorf(
672 "%s: value for 'sensitive' must be boolean",
673 o.Name))
674 continue
675 }
676 if k == "description" {
677 if desc, ok := o.RawConfig.config[k].(string); ok {
678 o.Description = desc
679 continue
680 }
681
682 errs = append(errs, fmt.Errorf(
683 "%s: value for 'description' must be string",
684 o.Name))
685 continue
686 }
687 invalidKeys = append(invalidKeys, k)
688 }
689 if len(invalidKeys) > 0 {
690 errs = append(errs, fmt.Errorf(
691 "%s: output has invalid keys: %s",
692 o.Name, strings.Join(invalidKeys, ", ")))
693 }
694 if !valueKeyFound {
695 errs = append(errs, fmt.Errorf(
696 "%s: output is missing required 'value' key", o.Name))
697 }
698
699 for _, v := range o.RawConfig.Variables {
700 if _, ok := v.(*CountVariable); ok {
701 errs = append(errs, fmt.Errorf(
702 "%s: count variables are only valid within resources", o.Name))
703 }
704 }
705 }
706 }
707
708 // Check that all variables are in the proper context
709 for source, rc := range c.rawConfigs() {
710 walker := &interpolationWalker{
711 ContextF: c.validateVarContextFn(source, &errs),
712 }
713 if err := reflectwalk.Walk(rc.Raw, walker); err != nil {
714 errs = append(errs, fmt.Errorf(
715 "%s: error reading config: %s", source, err))
716 }
717 }
718
719 // Validate the self variable
720 for source, rc := range c.rawConfigs() {
721 // Ignore provisioners. This is a pretty brittle way to do this,
722 // but better than also repeating all the resources.
723 if strings.Contains(source, "provision") {
724 continue
725 }
726
727 for _, v := range rc.Variables {
728 if _, ok := v.(*SelfVariable); ok {
729 errs = append(errs, fmt.Errorf(
730 "%s: cannot contain self-reference %s", source, v.FullKey()))
731 }
732 }
733 }
734
735 if len(errs) > 0 {
736 return &multierror.Error{Errors: errs}
737 }
738
739 return nil
740}
741
742// InterpolatedVariables is a helper that returns a mapping of all the interpolated
743// variables within the configuration. This is used to verify references
744// are valid in the Validate step.
745func (c *Config) InterpolatedVariables() map[string][]InterpolatedVariable {
746 result := make(map[string][]InterpolatedVariable)
747 for source, rc := range c.rawConfigs() {
748 for _, v := range rc.Variables {
749 result[source] = append(result[source], v)
750 }
751 }
752 return result
753}
754
755// rawConfigs returns all of the RawConfigs that are available keyed by
756// a human-friendly source.
757func (c *Config) rawConfigs() map[string]*RawConfig {
758 result := make(map[string]*RawConfig)
759 for _, m := range c.Modules {
760 source := fmt.Sprintf("module '%s'", m.Name)
761 result[source] = m.RawConfig
762 }
763
764 for _, pc := range c.ProviderConfigs {
765 source := fmt.Sprintf("provider config '%s'", pc.Name)
766 result[source] = pc.RawConfig
767 }
768
769 for _, rc := range c.Resources {
770 source := fmt.Sprintf("resource '%s'", rc.Id())
771 result[source+" count"] = rc.RawCount
772 result[source+" config"] = rc.RawConfig
773
774 for i, p := range rc.Provisioners {
775 subsource := fmt.Sprintf(
776 "%s provisioner %s (#%d)",
777 source, p.Type, i+1)
778 result[subsource] = p.RawConfig
779 }
780 }
781
782 for _, o := range c.Outputs {
783 source := fmt.Sprintf("output '%s'", o.Name)
784 result[source] = o.RawConfig
785 }
786
787 return result
788}
789
790func (c *Config) validateVarContextFn(
791 source string, errs *[]error) interpolationWalkerContextFunc {
792 return func(loc reflectwalk.Location, node ast.Node) {
793 // If we're in a slice element, then its fine, since you can do
794 // anything in there.
795 if loc == reflectwalk.SliceElem {
796 return
797 }
798
799 // Otherwise, let's check if there is a splat resource variable
800 // at the top level in here. We do this by doing a transform that
801 // replaces everything with a noop node unless its a variable
802 // access or concat. This should turn the AST into a flat tree
803 // of Concat(Noop, ...). If there are any variables left that are
804 // multi-access, then its still broken.
805 node = node.Accept(func(n ast.Node) ast.Node {
806 // If it is a concat or variable access, we allow it.
807 switch n.(type) {
808 case *ast.Output:
809 return n
810 case *ast.VariableAccess:
811 return n
812 }
813
814 // Otherwise, noop
815 return &noopNode{}
816 })
817
818 vars, err := DetectVariables(node)
819 if err != nil {
820 // Ignore it since this will be caught during parse. This
821 // actually probably should never happen by the time this
822 // is called, but its okay.
823 return
824 }
825
826 for _, v := range vars {
827 rv, ok := v.(*ResourceVariable)
828 if !ok {
829 return
830 }
831
832 if rv.Multi && rv.Index == -1 {
833 *errs = append(*errs, fmt.Errorf(
834 "%s: use of the splat ('*') operator must be wrapped in a list declaration",
835 source))
836 }
837 }
838 }
839}
840
841func (c *Config) validateDependsOn(
842 n string,
843 v []string,
844 resources map[string]*Resource,
845 modules map[string]*Module) []error {
846 // Verify depends on points to resources that all exist
847 var errs []error
848 for _, d := range v {
849 // Check if we contain interpolations
850 rc, err := NewRawConfig(map[string]interface{}{
851 "value": d,
852 })
853 if err == nil && len(rc.Variables) > 0 {
854 errs = append(errs, fmt.Errorf(
855 "%s: depends on value cannot contain interpolations: %s",
856 n, d))
857 continue
858 }
859
860 // If it is a module, verify it is a module
861 if strings.HasPrefix(d, "module.") {
862 name := d[len("module."):]
863 if _, ok := modules[name]; !ok {
864 errs = append(errs, fmt.Errorf(
865 "%s: resource depends on non-existent module '%s'",
866 n, name))
867 }
868
869 continue
870 }
871
872 // Check resources
873 if _, ok := resources[d]; !ok {
874 errs = append(errs, fmt.Errorf(
875 "%s: resource depends on non-existent resource '%s'",
876 n, d))
877 }
878 }
879
880 return errs
881}
882
883func (m *Module) mergerName() string {
884 return m.Id()
885}
886
887func (m *Module) mergerMerge(other merger) merger {
888 m2 := other.(*Module)
889
890 result := *m
891 result.Name = m2.Name
892 result.RawConfig = result.RawConfig.merge(m2.RawConfig)
893
894 if m2.Source != "" {
895 result.Source = m2.Source
896 }
897
898 return &result
899}
900
901func (o *Output) mergerName() string {
902 return o.Name
903}
904
905func (o *Output) mergerMerge(m merger) merger {
906 o2 := m.(*Output)
907
908 result := *o
909 result.Name = o2.Name
910 result.Description = o2.Description
911 result.RawConfig = result.RawConfig.merge(o2.RawConfig)
912 result.Sensitive = o2.Sensitive
913 result.DependsOn = o2.DependsOn
914
915 return &result
916}
917
918func (c *ProviderConfig) GoString() string {
919 return fmt.Sprintf("*%#v", *c)
920}
921
922func (c *ProviderConfig) FullName() string {
923 if c.Alias == "" {
924 return c.Name
925 }
926
927 return fmt.Sprintf("%s.%s", c.Name, c.Alias)
928}
929
930func (c *ProviderConfig) mergerName() string {
931 return c.Name
932}
933
934func (c *ProviderConfig) mergerMerge(m merger) merger {
935 c2 := m.(*ProviderConfig)
936
937 result := *c
938 result.Name = c2.Name
939 result.RawConfig = result.RawConfig.merge(c2.RawConfig)
940
941 if c2.Alias != "" {
942 result.Alias = c2.Alias
943 }
944
945 return &result
946}
947
948func (r *Resource) mergerName() string {
949 return r.Id()
950}
951
952func (r *Resource) mergerMerge(m merger) merger {
953 r2 := m.(*Resource)
954
955 result := *r
956 result.Mode = r2.Mode
957 result.Name = r2.Name
958 result.Type = r2.Type
959 result.RawConfig = result.RawConfig.merge(r2.RawConfig)
960
961 if r2.RawCount.Value() != "1" {
962 result.RawCount = r2.RawCount
963 }
964
965 if len(r2.Provisioners) > 0 {
966 result.Provisioners = r2.Provisioners
967 }
968
969 return &result
970}
971
972// Merge merges two variables to create a new third variable.
973func (v *Variable) Merge(v2 *Variable) *Variable {
974 // Shallow copy the variable
975 result := *v
976
977 // The names should be the same, but the second name always wins.
978 result.Name = v2.Name
979
980 if v2.DeclaredType != "" {
981 result.DeclaredType = v2.DeclaredType
982 }
983 if v2.Default != nil {
984 result.Default = v2.Default
985 }
986 if v2.Description != "" {
987 result.Description = v2.Description
988 }
989
990 return &result
991}
992
993var typeStringMap = map[string]VariableType{
994 "string": VariableTypeString,
995 "map": VariableTypeMap,
996 "list": VariableTypeList,
997}
998
999// Type returns the type of variable this is.
1000func (v *Variable) Type() VariableType {
1001 if v.DeclaredType != "" {
1002 declaredType, ok := typeStringMap[v.DeclaredType]
1003 if !ok {
1004 return VariableTypeUnknown
1005 }
1006
1007 return declaredType
1008 }
1009
1010 return v.inferTypeFromDefault()
1011}
1012
1013// ValidateTypeAndDefault ensures that default variable value is compatible
1014// with the declared type (if one exists), and that the type is one which is
1015// known to Terraform
1016func (v *Variable) ValidateTypeAndDefault() error {
1017 // If an explicit type is declared, ensure it is valid
1018 if v.DeclaredType != "" {
1019 if _, ok := typeStringMap[v.DeclaredType]; !ok {
1020 validTypes := []string{}
1021 for k := range typeStringMap {
1022 validTypes = append(validTypes, k)
1023 }
1024 return fmt.Errorf(
1025 "Variable '%s' type must be one of [%s] - '%s' is not a valid type",
1026 v.Name,
1027 strings.Join(validTypes, ", "),
1028 v.DeclaredType,
1029 )
1030 }
1031 }
1032
1033 if v.DeclaredType == "" || v.Default == nil {
1034 return nil
1035 }
1036
1037 if v.inferTypeFromDefault() != v.Type() {
1038 return fmt.Errorf("'%s' has a default value which is not of type '%s' (got '%s')",
1039 v.Name, v.DeclaredType, v.inferTypeFromDefault().Printable())
1040 }
1041
1042 return nil
1043}
1044
1045func (v *Variable) mergerName() string {
1046 return v.Name
1047}
1048
1049func (v *Variable) mergerMerge(m merger) merger {
1050 return v.Merge(m.(*Variable))
1051}
1052
1053// Required tests whether a variable is required or not.
1054func (v *Variable) Required() bool {
1055 return v.Default == nil
1056}
1057
1058// inferTypeFromDefault contains the logic for the old method of inferring
1059// variable types - we can also use this for validating that the declared
1060// type matches the type of the default value
1061func (v *Variable) inferTypeFromDefault() VariableType {
1062 if v.Default == nil {
1063 return VariableTypeString
1064 }
1065
1066 var s string
1067 if err := hilmapstructure.WeakDecode(v.Default, &s); err == nil {
1068 v.Default = s
1069 return VariableTypeString
1070 }
1071
1072 var m map[string]interface{}
1073 if err := hilmapstructure.WeakDecode(v.Default, &m); err == nil {
1074 v.Default = m
1075 return VariableTypeMap
1076 }
1077
1078 var l []interface{}
1079 if err := hilmapstructure.WeakDecode(v.Default, &l); err == nil {
1080 v.Default = l
1081 return VariableTypeList
1082 }
1083
1084 return VariableTypeUnknown
1085}
1086
1087func (m ResourceMode) Taintable() bool {
1088 switch m {
1089 case ManagedResourceMode:
1090 return true
1091 case DataResourceMode:
1092 return false
1093 default:
1094 panic(fmt.Errorf("unsupported ResourceMode value %s", m))
1095 }
1096}
diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go
new file mode 100644
index 0000000..0b3abbc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config_string.go
@@ -0,0 +1,338 @@
1package config
2
3import (
4 "bytes"
5 "fmt"
6 "sort"
7 "strings"
8)
9
10// TestString is a Stringer-like function that outputs a string that can
11// be used to easily compare multiple Config structures in unit tests.
12//
13// This function has no practical use outside of unit tests and debugging.
14func (c *Config) TestString() string {
15 if c == nil {
16 return "<nil config>"
17 }
18
19 var buf bytes.Buffer
20 if len(c.Modules) > 0 {
21 buf.WriteString("Modules:\n\n")
22 buf.WriteString(modulesStr(c.Modules))
23 buf.WriteString("\n\n")
24 }
25
26 if len(c.Variables) > 0 {
27 buf.WriteString("Variables:\n\n")
28 buf.WriteString(variablesStr(c.Variables))
29 buf.WriteString("\n\n")
30 }
31
32 if len(c.ProviderConfigs) > 0 {
33 buf.WriteString("Provider Configs:\n\n")
34 buf.WriteString(providerConfigsStr(c.ProviderConfigs))
35 buf.WriteString("\n\n")
36 }
37
38 if len(c.Resources) > 0 {
39 buf.WriteString("Resources:\n\n")
40 buf.WriteString(resourcesStr(c.Resources))
41 buf.WriteString("\n\n")
42 }
43
44 if len(c.Outputs) > 0 {
45 buf.WriteString("Outputs:\n\n")
46 buf.WriteString(outputsStr(c.Outputs))
47 buf.WriteString("\n")
48 }
49
50 return strings.TrimSpace(buf.String())
51}
52
53func terraformStr(t *Terraform) string {
54 result := ""
55
56 if b := t.Backend; b != nil {
57 result += fmt.Sprintf("backend (%s)\n", b.Type)
58
59 keys := make([]string, 0, len(b.RawConfig.Raw))
60 for k, _ := range b.RawConfig.Raw {
61 keys = append(keys, k)
62 }
63 sort.Strings(keys)
64
65 for _, k := range keys {
66 result += fmt.Sprintf(" %s\n", k)
67 }
68 }
69
70 return strings.TrimSpace(result)
71}
72
73func modulesStr(ms []*Module) string {
74 result := ""
75 order := make([]int, 0, len(ms))
76 ks := make([]string, 0, len(ms))
77 mapping := make(map[string]int)
78 for i, m := range ms {
79 k := m.Id()
80 ks = append(ks, k)
81 mapping[k] = i
82 }
83 sort.Strings(ks)
84 for _, k := range ks {
85 order = append(order, mapping[k])
86 }
87
88 for _, i := range order {
89 m := ms[i]
90 result += fmt.Sprintf("%s\n", m.Id())
91
92 ks := make([]string, 0, len(m.RawConfig.Raw))
93 for k, _ := range m.RawConfig.Raw {
94 ks = append(ks, k)
95 }
96 sort.Strings(ks)
97
98 result += fmt.Sprintf(" source = %s\n", m.Source)
99
100 for _, k := range ks {
101 result += fmt.Sprintf(" %s\n", k)
102 }
103 }
104
105 return strings.TrimSpace(result)
106}
107
108func outputsStr(os []*Output) string {
109 ns := make([]string, 0, len(os))
110 m := make(map[string]*Output)
111 for _, o := range os {
112 ns = append(ns, o.Name)
113 m[o.Name] = o
114 }
115 sort.Strings(ns)
116
117 result := ""
118 for _, n := range ns {
119 o := m[n]
120
121 result += fmt.Sprintf("%s\n", n)
122
123 if len(o.DependsOn) > 0 {
124 result += fmt.Sprintf(" dependsOn\n")
125 for _, d := range o.DependsOn {
126 result += fmt.Sprintf(" %s\n", d)
127 }
128 }
129
130 if len(o.RawConfig.Variables) > 0 {
131 result += fmt.Sprintf(" vars\n")
132 for _, rawV := range o.RawConfig.Variables {
133 kind := "unknown"
134 str := rawV.FullKey()
135
136 switch rawV.(type) {
137 case *ResourceVariable:
138 kind = "resource"
139 case *UserVariable:
140 kind = "user"
141 }
142
143 result += fmt.Sprintf(" %s: %s\n", kind, str)
144 }
145 }
146 }
147
148 return strings.TrimSpace(result)
149}
150
151// This helper turns a provider configs field into a deterministic
152// string value for comparison in tests.
153func providerConfigsStr(pcs []*ProviderConfig) string {
154 result := ""
155
156 ns := make([]string, 0, len(pcs))
157 m := make(map[string]*ProviderConfig)
158 for _, n := range pcs {
159 ns = append(ns, n.Name)
160 m[n.Name] = n
161 }
162 sort.Strings(ns)
163
164 for _, n := range ns {
165 pc := m[n]
166
167 result += fmt.Sprintf("%s\n", n)
168
169 keys := make([]string, 0, len(pc.RawConfig.Raw))
170 for k, _ := range pc.RawConfig.Raw {
171 keys = append(keys, k)
172 }
173 sort.Strings(keys)
174
175 for _, k := range keys {
176 result += fmt.Sprintf(" %s\n", k)
177 }
178
179 if len(pc.RawConfig.Variables) > 0 {
180 result += fmt.Sprintf(" vars\n")
181 for _, rawV := range pc.RawConfig.Variables {
182 kind := "unknown"
183 str := rawV.FullKey()
184
185 switch rawV.(type) {
186 case *ResourceVariable:
187 kind = "resource"
188 case *UserVariable:
189 kind = "user"
190 }
191
192 result += fmt.Sprintf(" %s: %s\n", kind, str)
193 }
194 }
195 }
196
197 return strings.TrimSpace(result)
198}
199
200// This helper turns a resources field into a deterministic
201// string value for comparison in tests.
202func resourcesStr(rs []*Resource) string {
203 result := ""
204 order := make([]int, 0, len(rs))
205 ks := make([]string, 0, len(rs))
206 mapping := make(map[string]int)
207 for i, r := range rs {
208 k := r.Id()
209 ks = append(ks, k)
210 mapping[k] = i
211 }
212 sort.Strings(ks)
213 for _, k := range ks {
214 order = append(order, mapping[k])
215 }
216
217 for _, i := range order {
218 r := rs[i]
219 result += fmt.Sprintf(
220 "%s (x%s)\n",
221 r.Id(),
222 r.RawCount.Value())
223
224 ks := make([]string, 0, len(r.RawConfig.Raw))
225 for k, _ := range r.RawConfig.Raw {
226 ks = append(ks, k)
227 }
228 sort.Strings(ks)
229
230 for _, k := range ks {
231 result += fmt.Sprintf(" %s\n", k)
232 }
233
234 if len(r.Provisioners) > 0 {
235 result += fmt.Sprintf(" provisioners\n")
236 for _, p := range r.Provisioners {
237 when := ""
238 if p.When != ProvisionerWhenCreate {
239 when = fmt.Sprintf(" (%s)", p.When.String())
240 }
241
242 result += fmt.Sprintf(" %s%s\n", p.Type, when)
243
244 if p.OnFailure != ProvisionerOnFailureFail {
245 result += fmt.Sprintf(" on_failure = %s\n", p.OnFailure.String())
246 }
247
248 ks := make([]string, 0, len(p.RawConfig.Raw))
249 for k, _ := range p.RawConfig.Raw {
250 ks = append(ks, k)
251 }
252 sort.Strings(ks)
253
254 for _, k := range ks {
255 result += fmt.Sprintf(" %s\n", k)
256 }
257 }
258 }
259
260 if len(r.DependsOn) > 0 {
261 result += fmt.Sprintf(" dependsOn\n")
262 for _, d := range r.DependsOn {
263 result += fmt.Sprintf(" %s\n", d)
264 }
265 }
266
267 if len(r.RawConfig.Variables) > 0 {
268 result += fmt.Sprintf(" vars\n")
269
270 ks := make([]string, 0, len(r.RawConfig.Variables))
271 for k, _ := range r.RawConfig.Variables {
272 ks = append(ks, k)
273 }
274 sort.Strings(ks)
275
276 for _, k := range ks {
277 rawV := r.RawConfig.Variables[k]
278 kind := "unknown"
279 str := rawV.FullKey()
280
281 switch rawV.(type) {
282 case *ResourceVariable:
283 kind = "resource"
284 case *UserVariable:
285 kind = "user"
286 }
287
288 result += fmt.Sprintf(" %s: %s\n", kind, str)
289 }
290 }
291 }
292
293 return strings.TrimSpace(result)
294}
295
296// This helper turns a variables field into a deterministic
297// string value for comparison in tests.
298func variablesStr(vs []*Variable) string {
299 result := ""
300 ks := make([]string, 0, len(vs))
301 m := make(map[string]*Variable)
302 for _, v := range vs {
303 ks = append(ks, v.Name)
304 m[v.Name] = v
305 }
306 sort.Strings(ks)
307
308 for _, k := range ks {
309 v := m[k]
310
311 required := ""
312 if v.Required() {
313 required = " (required)"
314 }
315
316 declaredType := ""
317 if v.DeclaredType != "" {
318 declaredType = fmt.Sprintf(" (%s)", v.DeclaredType)
319 }
320
321 if v.Default == nil || v.Default == "" {
322 v.Default = "<>"
323 }
324 if v.Description == "" {
325 v.Description = "<>"
326 }
327
328 result += fmt.Sprintf(
329 "%s%s%s\n %v\n %s\n",
330 k,
331 required,
332 declaredType,
333 v.Default,
334 v.Description)
335 }
336
337 return strings.TrimSpace(result)
338}
diff --git a/vendor/github.com/hashicorp/terraform/config/config_terraform.go b/vendor/github.com/hashicorp/terraform/config/config_terraform.go
new file mode 100644
index 0000000..8535c96
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config_terraform.go
@@ -0,0 +1,117 @@
1package config
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/go-version"
8 "github.com/mitchellh/hashstructure"
9)
10
11// Terraform is the Terraform meta-configuration that can be present
12// in configuration files for configuring Terraform itself.
13type Terraform struct {
14 RequiredVersion string `hcl:"required_version"` // Required Terraform version (constraint)
15 Backend *Backend // See Backend struct docs
16}
17
18// Validate performs the validation for just the Terraform configuration.
19func (t *Terraform) Validate() []error {
20 var errs []error
21
22 if raw := t.RequiredVersion; raw != "" {
23 // Check that the value has no interpolations
24 rc, err := NewRawConfig(map[string]interface{}{
25 "root": raw,
26 })
27 if err != nil {
28 errs = append(errs, fmt.Errorf(
29 "terraform.required_version: %s", err))
30 } else if len(rc.Interpolations) > 0 {
31 errs = append(errs, fmt.Errorf(
32 "terraform.required_version: cannot contain interpolations"))
33 } else {
34 // Check it is valid
35 _, err := version.NewConstraint(raw)
36 if err != nil {
37 errs = append(errs, fmt.Errorf(
38 "terraform.required_version: invalid syntax: %s", err))
39 }
40 }
41 }
42
43 if t.Backend != nil {
44 errs = append(errs, t.Backend.Validate()...)
45 }
46
47 return errs
48}
49
50// Merge t with t2.
51// Any conflicting fields are overwritten by t2.
52func (t *Terraform) Merge(t2 *Terraform) {
53 if t2.RequiredVersion != "" {
54 t.RequiredVersion = t2.RequiredVersion
55 }
56
57 if t2.Backend != nil {
58 t.Backend = t2.Backend
59 }
60}
61
62// Backend is the configuration for the "backend" to use with Terraform.
63// A backend is responsible for all major behavior of Terraform's core.
64// The abstraction layer above the core (the "backend") allows for behavior
65// such as remote operation.
66type Backend struct {
67 Type string
68 RawConfig *RawConfig
69
70 // Hash is a unique hash code representing the original configuration
71 // of the backend. This won't be recomputed unless Rehash is called.
72 Hash uint64
73}
74
75// Rehash returns a unique content hash for this backend's configuration
76// as a uint64 value.
77func (b *Backend) Rehash() uint64 {
78 // If we have no backend, the value is zero
79 if b == nil {
80 return 0
81 }
82
83 // Use hashstructure to hash only our type with the config.
84 code, err := hashstructure.Hash(map[string]interface{}{
85 "type": b.Type,
86 "config": b.RawConfig.Raw,
87 }, nil)
88
89 // This should never happen since we have just some basic primitives
90 // so panic if there is an error.
91 if err != nil {
92 panic(err)
93 }
94
95 return code
96}
97
98func (b *Backend) Validate() []error {
99 if len(b.RawConfig.Interpolations) > 0 {
100 return []error{fmt.Errorf(strings.TrimSpace(errBackendInterpolations))}
101 }
102
103 return nil
104}
105
106const errBackendInterpolations = `
107terraform.backend: configuration cannot contain interpolations
108
109The backend configuration is loaded by Terraform extremely early, before
110the core of Terraform can be initialized. This is necessary because the backend
111dictates the behavior of that core. The core is what handles interpolation
112processing. Because of this, interpolations cannot be used in backend
113configuration.
114
115If you'd like to parameterize backend configuration, we recommend using
116partial configuration with the "-backend-config" flag to "terraform init".
117`
diff --git a/vendor/github.com/hashicorp/terraform/config/config_tree.go b/vendor/github.com/hashicorp/terraform/config/config_tree.go
new file mode 100644
index 0000000..08dc0fe
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/config_tree.go
@@ -0,0 +1,43 @@
1package config
2
3// configTree represents a tree of configurations where the root is the
4// first file and its children are the configurations it has imported.
5type configTree struct {
6 Path string
7 Config *Config
8 Children []*configTree
9}
10
11// Flatten flattens the entire tree down to a single merged Config
12// structure.
13func (t *configTree) Flatten() (*Config, error) {
14 // No children is easy: we're already merged!
15 if len(t.Children) == 0 {
16 return t.Config, nil
17 }
18
19 // Depth-first, merge all the children first.
20 childConfigs := make([]*Config, len(t.Children))
21 for i, ct := range t.Children {
22 c, err := ct.Flatten()
23 if err != nil {
24 return nil, err
25 }
26
27 childConfigs[i] = c
28 }
29
30 // Merge all the children in order
31 config := childConfigs[0]
32 childConfigs = childConfigs[1:]
33 for _, config2 := range childConfigs {
34 var err error
35 config, err = Merge(config, config2)
36 if err != nil {
37 return nil, err
38 }
39 }
40
41 // Merge the final merged child config with our own
42 return Merge(config, t.Config)
43}
diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go
new file mode 100644
index 0000000..37ec11a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/import_tree.go
@@ -0,0 +1,113 @@
1package config
2
3import (
4 "fmt"
5 "io"
6)
7
8// configurable is an interface that must be implemented by any configuration
9// formats of Terraform in order to return a *Config.
10type configurable interface {
11 Config() (*Config, error)
12}
13
14// importTree is the result of the first-pass load of the configuration
15// files. It is a tree of raw configurables and then any children (their
16// imports).
17//
18// An importTree can be turned into a configTree.
19type importTree struct {
20 Path string
21 Raw configurable
22 Children []*importTree
23}
24
25// This is the function type that must be implemented by the configuration
26// file loader to turn a single file into a configurable and any additional
27// imports.
28type fileLoaderFunc func(path string) (configurable, []string, error)
29
30// loadTree takes a single file and loads the entire importTree for that
31// file. This function detects what kind of configuration file it is an
32// executes the proper fileLoaderFunc.
33func loadTree(root string) (*importTree, error) {
34 var f fileLoaderFunc
35 switch ext(root) {
36 case ".tf", ".tf.json":
37 f = loadFileHcl
38 default:
39 }
40
41 if f == nil {
42 return nil, fmt.Errorf(
43 "%s: unknown configuration format. Use '.tf' or '.tf.json' extension",
44 root)
45 }
46
47 c, imps, err := f(root)
48 if err != nil {
49 return nil, err
50 }
51
52 children := make([]*importTree, len(imps))
53 for i, imp := range imps {
54 t, err := loadTree(imp)
55 if err != nil {
56 return nil, err
57 }
58
59 children[i] = t
60 }
61
62 return &importTree{
63 Path: root,
64 Raw: c,
65 Children: children,
66 }, nil
67}
68
69// Close releases any resources we might be holding open for the importTree.
70//
71// This can safely be called even while ConfigTree results are alive. The
72// importTree is not bound to these.
73func (t *importTree) Close() error {
74 if c, ok := t.Raw.(io.Closer); ok {
75 c.Close()
76 }
77 for _, ct := range t.Children {
78 ct.Close()
79 }
80
81 return nil
82}
83
84// ConfigTree traverses the importTree and turns each node into a *Config
85// object, ultimately returning a *configTree.
86func (t *importTree) ConfigTree() (*configTree, error) {
87 config, err := t.Raw.Config()
88 if err != nil {
89 return nil, fmt.Errorf(
90 "Error loading %s: %s",
91 t.Path,
92 err)
93 }
94
95 // Build our result
96 result := &configTree{
97 Path: t.Path,
98 Config: config,
99 }
100
101 // Build the config trees for the children
102 result.Children = make([]*configTree, len(t.Children))
103 for i, ct := range t.Children {
104 t, err := ct.ConfigTree()
105 if err != nil {
106 return nil, err
107 }
108
109 result.Children[i] = t
110 }
111
112 return result, nil
113}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go
new file mode 100644
index 0000000..bbb3555
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate.go
@@ -0,0 +1,386 @@
1package config
2
3import (
4 "fmt"
5 "strconv"
6 "strings"
7
8 "github.com/hashicorp/hil/ast"
9)
10
11// An InterpolatedVariable is a variable reference within an interpolation.
12//
13// Implementations of this interface represents various sources where
14// variables can come from: user variables, resources, etc.
15type InterpolatedVariable interface {
16 FullKey() string
17}
18
19// CountVariable is a variable for referencing information about
20// the count.
21type CountVariable struct {
22 Type CountValueType
23 key string
24}
25
26// CountValueType is the type of the count variable that is referenced.
27type CountValueType byte
28
29const (
30 CountValueInvalid CountValueType = iota
31 CountValueIndex
32)
33
34// A ModuleVariable is a variable that is referencing the output
35// of a module, such as "${module.foo.bar}"
36type ModuleVariable struct {
37 Name string
38 Field string
39 key string
40}
41
42// A PathVariable is a variable that references path information about the
43// module.
44type PathVariable struct {
45 Type PathValueType
46 key string
47}
48
49type PathValueType byte
50
51const (
52 PathValueInvalid PathValueType = iota
53 PathValueCwd
54 PathValueModule
55 PathValueRoot
56)
57
58// A ResourceVariable is a variable that is referencing the field
59// of a resource, such as "${aws_instance.foo.ami}"
60type ResourceVariable struct {
61 Mode ResourceMode
62 Type string // Resource type, i.e. "aws_instance"
63 Name string // Resource name
64 Field string // Resource field
65
66 Multi bool // True if multi-variable: aws_instance.foo.*.id
67 Index int // Index for multi-variable: aws_instance.foo.1.id == 1
68
69 key string
70}
71
72// SelfVariable is a variable that is referencing the same resource
73// it is running on: "${self.address}"
74type SelfVariable struct {
75 Field string
76
77 key string
78}
79
80// SimpleVariable is an unprefixed variable, which can show up when users have
81// strings they are passing down to resources that use interpolation
82// internally. The template_file resource is an example of this.
83type SimpleVariable struct {
84 Key string
85}
86
87// TerraformVariable is a "terraform."-prefixed variable used to access
88// metadata about the Terraform run.
89type TerraformVariable struct {
90 Field string
91 key string
92}
93
94// A UserVariable is a variable that is referencing a user variable
95// that is inputted from outside the configuration. This looks like
96// "${var.foo}"
97type UserVariable struct {
98 Name string
99 Elem string
100
101 key string
102}
103
104func NewInterpolatedVariable(v string) (InterpolatedVariable, error) {
105 if strings.HasPrefix(v, "count.") {
106 return NewCountVariable(v)
107 } else if strings.HasPrefix(v, "path.") {
108 return NewPathVariable(v)
109 } else if strings.HasPrefix(v, "self.") {
110 return NewSelfVariable(v)
111 } else if strings.HasPrefix(v, "terraform.") {
112 return NewTerraformVariable(v)
113 } else if strings.HasPrefix(v, "var.") {
114 return NewUserVariable(v)
115 } else if strings.HasPrefix(v, "module.") {
116 return NewModuleVariable(v)
117 } else if !strings.ContainsRune(v, '.') {
118 return NewSimpleVariable(v)
119 } else {
120 return NewResourceVariable(v)
121 }
122}
123
124func NewCountVariable(key string) (*CountVariable, error) {
125 var fieldType CountValueType
126 parts := strings.SplitN(key, ".", 2)
127 switch parts[1] {
128 case "index":
129 fieldType = CountValueIndex
130 }
131
132 return &CountVariable{
133 Type: fieldType,
134 key: key,
135 }, nil
136}
137
138func (c *CountVariable) FullKey() string {
139 return c.key
140}
141
142func NewModuleVariable(key string) (*ModuleVariable, error) {
143 parts := strings.SplitN(key, ".", 3)
144 if len(parts) < 3 {
145 return nil, fmt.Errorf(
146 "%s: module variables must be three parts: module.name.attr",
147 key)
148 }
149
150 return &ModuleVariable{
151 Name: parts[1],
152 Field: parts[2],
153 key: key,
154 }, nil
155}
156
157func (v *ModuleVariable) FullKey() string {
158 return v.key
159}
160
161func (v *ModuleVariable) GoString() string {
162 return fmt.Sprintf("*%#v", *v)
163}
164
165func NewPathVariable(key string) (*PathVariable, error) {
166 var fieldType PathValueType
167 parts := strings.SplitN(key, ".", 2)
168 switch parts[1] {
169 case "cwd":
170 fieldType = PathValueCwd
171 case "module":
172 fieldType = PathValueModule
173 case "root":
174 fieldType = PathValueRoot
175 }
176
177 return &PathVariable{
178 Type: fieldType,
179 key: key,
180 }, nil
181}
182
183func (v *PathVariable) FullKey() string {
184 return v.key
185}
186
187func NewResourceVariable(key string) (*ResourceVariable, error) {
188 var mode ResourceMode
189 var parts []string
190 if strings.HasPrefix(key, "data.") {
191 mode = DataResourceMode
192 parts = strings.SplitN(key, ".", 4)
193 if len(parts) < 4 {
194 return nil, fmt.Errorf(
195 "%s: data variables must be four parts: data.TYPE.NAME.ATTR",
196 key)
197 }
198
199 // Don't actually need the "data." prefix for parsing, since it's
200 // always constant.
201 parts = parts[1:]
202 } else {
203 mode = ManagedResourceMode
204 parts = strings.SplitN(key, ".", 3)
205 if len(parts) < 3 {
206 return nil, fmt.Errorf(
207 "%s: resource variables must be three parts: TYPE.NAME.ATTR",
208 key)
209 }
210 }
211
212 field := parts[2]
213 multi := false
214 var index int
215
216 if idx := strings.Index(field, "."); idx != -1 {
217 indexStr := field[:idx]
218 multi = indexStr == "*"
219 index = -1
220
221 if !multi {
222 indexInt, err := strconv.ParseInt(indexStr, 0, 0)
223 if err == nil {
224 multi = true
225 index = int(indexInt)
226 }
227 }
228
229 if multi {
230 field = field[idx+1:]
231 }
232 }
233
234 return &ResourceVariable{
235 Mode: mode,
236 Type: parts[0],
237 Name: parts[1],
238 Field: field,
239 Multi: multi,
240 Index: index,
241 key: key,
242 }, nil
243}
244
245func (v *ResourceVariable) ResourceId() string {
246 switch v.Mode {
247 case ManagedResourceMode:
248 return fmt.Sprintf("%s.%s", v.Type, v.Name)
249 case DataResourceMode:
250 return fmt.Sprintf("data.%s.%s", v.Type, v.Name)
251 default:
252 panic(fmt.Errorf("unknown resource mode %s", v.Mode))
253 }
254}
255
256func (v *ResourceVariable) FullKey() string {
257 return v.key
258}
259
260func NewSelfVariable(key string) (*SelfVariable, error) {
261 field := key[len("self."):]
262
263 return &SelfVariable{
264 Field: field,
265
266 key: key,
267 }, nil
268}
269
270func (v *SelfVariable) FullKey() string {
271 return v.key
272}
273
274func (v *SelfVariable) GoString() string {
275 return fmt.Sprintf("*%#v", *v)
276}
277
278func NewSimpleVariable(key string) (*SimpleVariable, error) {
279 return &SimpleVariable{key}, nil
280}
281
282func (v *SimpleVariable) FullKey() string {
283 return v.Key
284}
285
286func (v *SimpleVariable) GoString() string {
287 return fmt.Sprintf("*%#v", *v)
288}
289
290func NewTerraformVariable(key string) (*TerraformVariable, error) {
291 field := key[len("terraform."):]
292 return &TerraformVariable{
293 Field: field,
294 key: key,
295 }, nil
296}
297
298func (v *TerraformVariable) FullKey() string {
299 return v.key
300}
301
302func (v *TerraformVariable) GoString() string {
303 return fmt.Sprintf("*%#v", *v)
304}
305
306func NewUserVariable(key string) (*UserVariable, error) {
307 name := key[len("var."):]
308 elem := ""
309 if idx := strings.Index(name, "."); idx > -1 {
310 elem = name[idx+1:]
311 name = name[:idx]
312 }
313
314 if len(elem) > 0 {
315 return nil, fmt.Errorf("Invalid dot index found: 'var.%s.%s'. Values in maps and lists can be referenced using square bracket indexing, like: 'var.mymap[\"key\"]' or 'var.mylist[1]'.", name, elem)
316 }
317
318 return &UserVariable{
319 key: key,
320
321 Name: name,
322 Elem: elem,
323 }, nil
324}
325
326func (v *UserVariable) FullKey() string {
327 return v.key
328}
329
330func (v *UserVariable) GoString() string {
331 return fmt.Sprintf("*%#v", *v)
332}
333
334// DetectVariables takes an AST root and returns all the interpolated
335// variables that are detected in the AST tree.
336func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) {
337 var result []InterpolatedVariable
338 var resultErr error
339
340 // Visitor callback
341 fn := func(n ast.Node) ast.Node {
342 if resultErr != nil {
343 return n
344 }
345
346 switch vn := n.(type) {
347 case *ast.VariableAccess:
348 v, err := NewInterpolatedVariable(vn.Name)
349 if err != nil {
350 resultErr = err
351 return n
352 }
353 result = append(result, v)
354 case *ast.Index:
355 if va, ok := vn.Target.(*ast.VariableAccess); ok {
356 v, err := NewInterpolatedVariable(va.Name)
357 if err != nil {
358 resultErr = err
359 return n
360 }
361 result = append(result, v)
362 }
363 if va, ok := vn.Key.(*ast.VariableAccess); ok {
364 v, err := NewInterpolatedVariable(va.Name)
365 if err != nil {
366 resultErr = err
367 return n
368 }
369 result = append(result, v)
370 }
371 default:
372 return n
373 }
374
375 return n
376 }
377
378 // Visitor pattern
379 root.Accept(fn)
380
381 if resultErr != nil {
382 return nil, resultErr
383 }
384
385 return result, nil
386}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
new file mode 100644
index 0000000..f1f97b0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
@@ -0,0 +1,1390 @@
1package config
2
3import (
4 "crypto/md5"
5 "crypto/sha1"
6 "crypto/sha256"
7 "crypto/sha512"
8 "encoding/base64"
9 "encoding/hex"
10 "encoding/json"
11 "fmt"
12 "io/ioutil"
13 "math"
14 "net"
15 "path/filepath"
16 "regexp"
17 "sort"
18 "strconv"
19 "strings"
20 "time"
21
22 "github.com/apparentlymart/go-cidr/cidr"
23 "github.com/hashicorp/go-uuid"
24 "github.com/hashicorp/hil"
25 "github.com/hashicorp/hil/ast"
26 "github.com/mitchellh/go-homedir"
27)
28
29// stringSliceToVariableValue converts a string slice into the value
30// required to be returned from interpolation functions which return
31// TypeList.
32func stringSliceToVariableValue(values []string) []ast.Variable {
33 output := make([]ast.Variable, len(values))
34 for index, value := range values {
35 output[index] = ast.Variable{
36 Type: ast.TypeString,
37 Value: value,
38 }
39 }
40 return output
41}
42
43func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) {
44 output := make([]string, len(values))
45 for index, value := range values {
46 if value.Type != ast.TypeString {
47 return []string{}, fmt.Errorf("list has non-string element (%T)", value.Type.String())
48 }
49 output[index] = value.Value.(string)
50 }
51 return output, nil
52}
53
54// Funcs is the mapping of built-in functions for configuration.
55func Funcs() map[string]ast.Function {
56 return map[string]ast.Function{
57 "basename": interpolationFuncBasename(),
58 "base64decode": interpolationFuncBase64Decode(),
59 "base64encode": interpolationFuncBase64Encode(),
60 "base64sha256": interpolationFuncBase64Sha256(),
61 "base64sha512": interpolationFuncBase64Sha512(),
62 "ceil": interpolationFuncCeil(),
63 "chomp": interpolationFuncChomp(),
64 "cidrhost": interpolationFuncCidrHost(),
65 "cidrnetmask": interpolationFuncCidrNetmask(),
66 "cidrsubnet": interpolationFuncCidrSubnet(),
67 "coalesce": interpolationFuncCoalesce(),
68 "coalescelist": interpolationFuncCoalesceList(),
69 "compact": interpolationFuncCompact(),
70 "concat": interpolationFuncConcat(),
71 "dirname": interpolationFuncDirname(),
72 "distinct": interpolationFuncDistinct(),
73 "element": interpolationFuncElement(),
74 "file": interpolationFuncFile(),
75 "matchkeys": interpolationFuncMatchKeys(),
76 "floor": interpolationFuncFloor(),
77 "format": interpolationFuncFormat(),
78 "formatlist": interpolationFuncFormatList(),
79 "index": interpolationFuncIndex(),
80 "join": interpolationFuncJoin(),
81 "jsonencode": interpolationFuncJSONEncode(),
82 "length": interpolationFuncLength(),
83 "list": interpolationFuncList(),
84 "log": interpolationFuncLog(),
85 "lower": interpolationFuncLower(),
86 "map": interpolationFuncMap(),
87 "max": interpolationFuncMax(),
88 "md5": interpolationFuncMd5(),
89 "merge": interpolationFuncMerge(),
90 "min": interpolationFuncMin(),
91 "pathexpand": interpolationFuncPathExpand(),
92 "uuid": interpolationFuncUUID(),
93 "replace": interpolationFuncReplace(),
94 "sha1": interpolationFuncSha1(),
95 "sha256": interpolationFuncSha256(),
96 "sha512": interpolationFuncSha512(),
97 "signum": interpolationFuncSignum(),
98 "slice": interpolationFuncSlice(),
99 "sort": interpolationFuncSort(),
100 "split": interpolationFuncSplit(),
101 "substr": interpolationFuncSubstr(),
102 "timestamp": interpolationFuncTimestamp(),
103 "title": interpolationFuncTitle(),
104 "trimspace": interpolationFuncTrimSpace(),
105 "upper": interpolationFuncUpper(),
106 "zipmap": interpolationFuncZipMap(),
107 }
108}
109
110// interpolationFuncList creates a list from the parameters passed
111// to it.
112func interpolationFuncList() ast.Function {
113 return ast.Function{
114 ArgTypes: []ast.Type{},
115 ReturnType: ast.TypeList,
116 Variadic: true,
117 VariadicType: ast.TypeAny,
118 Callback: func(args []interface{}) (interface{}, error) {
119 var outputList []ast.Variable
120
121 for i, val := range args {
122 switch v := val.(type) {
123 case string:
124 outputList = append(outputList, ast.Variable{Type: ast.TypeString, Value: v})
125 case []ast.Variable:
126 outputList = append(outputList, ast.Variable{Type: ast.TypeList, Value: v})
127 case map[string]ast.Variable:
128 outputList = append(outputList, ast.Variable{Type: ast.TypeMap, Value: v})
129 default:
130 return nil, fmt.Errorf("unexpected type %T for argument %d in list", v, i)
131 }
132 }
133
134 // we don't support heterogeneous types, so make sure all types match the first
135 if len(outputList) > 0 {
136 firstType := outputList[0].Type
137 for i, v := range outputList[1:] {
138 if v.Type != firstType {
139 return nil, fmt.Errorf("unexpected type %s for argument %d in list", v.Type, i+1)
140 }
141 }
142 }
143
144 return outputList, nil
145 },
146 }
147}
148
149// interpolationFuncMap creates a map from the parameters passed
150// to it.
151func interpolationFuncMap() ast.Function {
152 return ast.Function{
153 ArgTypes: []ast.Type{},
154 ReturnType: ast.TypeMap,
155 Variadic: true,
156 VariadicType: ast.TypeAny,
157 Callback: func(args []interface{}) (interface{}, error) {
158 outputMap := make(map[string]ast.Variable)
159
160 if len(args)%2 != 0 {
161 return nil, fmt.Errorf("requires an even number of arguments, got %d", len(args))
162 }
163
164 var firstType *ast.Type
165 for i := 0; i < len(args); i += 2 {
166 key, ok := args[i].(string)
167 if !ok {
168 return nil, fmt.Errorf("argument %d represents a key, so it must be a string", i+1)
169 }
170 val := args[i+1]
171 variable, err := hil.InterfaceToVariable(val)
172 if err != nil {
173 return nil, err
174 }
175 // Enforce map type homogeneity
176 if firstType == nil {
177 firstType = &variable.Type
178 } else if variable.Type != *firstType {
179 return nil, fmt.Errorf("all map values must have the same type, got %s then %s", firstType.Printable(), variable.Type.Printable())
180 }
181 // Check for duplicate keys
182 if _, ok := outputMap[key]; ok {
183 return nil, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key)
184 }
185 outputMap[key] = variable
186 }
187
188 return outputMap, nil
189 },
190 }
191}
192
193// interpolationFuncCompact strips a list of multi-variable values
194// (e.g. as returned by "split") of any empty strings.
195func interpolationFuncCompact() ast.Function {
196 return ast.Function{
197 ArgTypes: []ast.Type{ast.TypeList},
198 ReturnType: ast.TypeList,
199 Variadic: false,
200 Callback: func(args []interface{}) (interface{}, error) {
201 inputList := args[0].([]ast.Variable)
202
203 var outputList []string
204 for _, val := range inputList {
205 strVal, ok := val.Value.(string)
206 if !ok {
207 return nil, fmt.Errorf(
208 "compact() may only be used with flat lists, this list contains elements of %s",
209 val.Type.Printable())
210 }
211 if strVal == "" {
212 continue
213 }
214
215 outputList = append(outputList, strVal)
216 }
217 return stringSliceToVariableValue(outputList), nil
218 },
219 }
220}
221
222// interpolationFuncCidrHost implements the "cidrhost" function that
223// fills in the host part of a CIDR range address to create a single
224// host address
225func interpolationFuncCidrHost() ast.Function {
226 return ast.Function{
227 ArgTypes: []ast.Type{
228 ast.TypeString, // starting CIDR mask
229 ast.TypeInt, // host number to insert
230 },
231 ReturnType: ast.TypeString,
232 Variadic: false,
233 Callback: func(args []interface{}) (interface{}, error) {
234 hostNum := args[1].(int)
235 _, network, err := net.ParseCIDR(args[0].(string))
236 if err != nil {
237 return nil, fmt.Errorf("invalid CIDR expression: %s", err)
238 }
239
240 ip, err := cidr.Host(network, hostNum)
241 if err != nil {
242 return nil, err
243 }
244
245 return ip.String(), nil
246 },
247 }
248}
249
250// interpolationFuncCidrNetmask implements the "cidrnetmask" function
251// that returns the subnet mask in IP address notation.
252func interpolationFuncCidrNetmask() ast.Function {
253 return ast.Function{
254 ArgTypes: []ast.Type{
255 ast.TypeString, // CIDR mask
256 },
257 ReturnType: ast.TypeString,
258 Variadic: false,
259 Callback: func(args []interface{}) (interface{}, error) {
260 _, network, err := net.ParseCIDR(args[0].(string))
261 if err != nil {
262 return nil, fmt.Errorf("invalid CIDR expression: %s", err)
263 }
264
265 return net.IP(network.Mask).String(), nil
266 },
267 }
268}
269
270// interpolationFuncCidrSubnet implements the "cidrsubnet" function that
271// adds an additional subnet of the given length onto an existing
272// IP block expressed in CIDR notation.
273func interpolationFuncCidrSubnet() ast.Function {
274 return ast.Function{
275 ArgTypes: []ast.Type{
276 ast.TypeString, // starting CIDR mask
277 ast.TypeInt, // number of bits to extend the prefix
278 ast.TypeInt, // network number to append to the prefix
279 },
280 ReturnType: ast.TypeString,
281 Variadic: false,
282 Callback: func(args []interface{}) (interface{}, error) {
283 extraBits := args[1].(int)
284 subnetNum := args[2].(int)
285 _, network, err := net.ParseCIDR(args[0].(string))
286 if err != nil {
287 return nil, fmt.Errorf("invalid CIDR expression: %s", err)
288 }
289
290 // For portability with 32-bit systems where the subnet number
291 // will be a 32-bit int, we only allow extension of 32 bits in
292 // one call even if we're running on a 64-bit machine.
293 // (Of course, this is significant only for IPv6.)
294 if extraBits > 32 {
295 return nil, fmt.Errorf("may not extend prefix by more than 32 bits")
296 }
297
298 newNetwork, err := cidr.Subnet(network, extraBits, subnetNum)
299 if err != nil {
300 return nil, err
301 }
302
303 return newNetwork.String(), nil
304 },
305 }
306}
307
308// interpolationFuncCoalesce implements the "coalesce" function that
309// returns the first non null / empty string from the provided input
310func interpolationFuncCoalesce() ast.Function {
311 return ast.Function{
312 ArgTypes: []ast.Type{ast.TypeString},
313 ReturnType: ast.TypeString,
314 Variadic: true,
315 VariadicType: ast.TypeString,
316 Callback: func(args []interface{}) (interface{}, error) {
317 if len(args) < 2 {
318 return nil, fmt.Errorf("must provide at least two arguments")
319 }
320 for _, arg := range args {
321 argument := arg.(string)
322
323 if argument != "" {
324 return argument, nil
325 }
326 }
327 return "", nil
328 },
329 }
330}
331
332// interpolationFuncCoalesceList implements the "coalescelist" function that
333// returns the first non empty list from the provided input
334func interpolationFuncCoalesceList() ast.Function {
335 return ast.Function{
336 ArgTypes: []ast.Type{ast.TypeList},
337 ReturnType: ast.TypeList,
338 Variadic: true,
339 VariadicType: ast.TypeList,
340 Callback: func(args []interface{}) (interface{}, error) {
341 if len(args) < 2 {
342 return nil, fmt.Errorf("must provide at least two arguments")
343 }
344 for _, arg := range args {
345 argument := arg.([]ast.Variable)
346
347 if len(argument) > 0 {
348 return argument, nil
349 }
350 }
351 return make([]ast.Variable, 0), nil
352 },
353 }
354}
355
356// interpolationFuncConcat implements the "concat" function that concatenates
357// multiple lists.
358func interpolationFuncConcat() ast.Function {
359 return ast.Function{
360 ArgTypes: []ast.Type{ast.TypeList},
361 ReturnType: ast.TypeList,
362 Variadic: true,
363 VariadicType: ast.TypeList,
364 Callback: func(args []interface{}) (interface{}, error) {
365 var outputList []ast.Variable
366
367 for _, arg := range args {
368 for _, v := range arg.([]ast.Variable) {
369 switch v.Type {
370 case ast.TypeString:
371 outputList = append(outputList, v)
372 case ast.TypeList:
373 outputList = append(outputList, v)
374 case ast.TypeMap:
375 outputList = append(outputList, v)
376 default:
377 return nil, fmt.Errorf("concat() does not support lists of %s", v.Type.Printable())
378 }
379 }
380 }
381
382 // we don't support heterogeneous types, so make sure all types match the first
383 if len(outputList) > 0 {
384 firstType := outputList[0].Type
385 for _, v := range outputList[1:] {
386 if v.Type != firstType {
387 return nil, fmt.Errorf("unexpected %s in list of %s", v.Type.Printable(), firstType.Printable())
388 }
389 }
390 }
391
392 return outputList, nil
393 },
394 }
395}
396
397// interpolationFuncFile implements the "file" function that allows
398// loading contents from a file.
399func interpolationFuncFile() ast.Function {
400 return ast.Function{
401 ArgTypes: []ast.Type{ast.TypeString},
402 ReturnType: ast.TypeString,
403 Callback: func(args []interface{}) (interface{}, error) {
404 path, err := homedir.Expand(args[0].(string))
405 if err != nil {
406 return "", err
407 }
408 data, err := ioutil.ReadFile(path)
409 if err != nil {
410 return "", err
411 }
412
413 return string(data), nil
414 },
415 }
416}
417
418// interpolationFuncFormat implements the "format" function that does
419// string formatting.
420func interpolationFuncFormat() ast.Function {
421 return ast.Function{
422 ArgTypes: []ast.Type{ast.TypeString},
423 Variadic: true,
424 VariadicType: ast.TypeAny,
425 ReturnType: ast.TypeString,
426 Callback: func(args []interface{}) (interface{}, error) {
427 format := args[0].(string)
428 return fmt.Sprintf(format, args[1:]...), nil
429 },
430 }
431}
432
433// interpolationFuncMax returns the maximum of the numeric arguments
434func interpolationFuncMax() ast.Function {
435 return ast.Function{
436 ArgTypes: []ast.Type{ast.TypeFloat},
437 ReturnType: ast.TypeFloat,
438 Variadic: true,
439 VariadicType: ast.TypeFloat,
440 Callback: func(args []interface{}) (interface{}, error) {
441 max := args[0].(float64)
442
443 for i := 1; i < len(args); i++ {
444 max = math.Max(max, args[i].(float64))
445 }
446
447 return max, nil
448 },
449 }
450}
451
452// interpolationFuncMin returns the minimum of the numeric arguments
453func interpolationFuncMin() ast.Function {
454 return ast.Function{
455 ArgTypes: []ast.Type{ast.TypeFloat},
456 ReturnType: ast.TypeFloat,
457 Variadic: true,
458 VariadicType: ast.TypeFloat,
459 Callback: func(args []interface{}) (interface{}, error) {
460 min := args[0].(float64)
461
462 for i := 1; i < len(args); i++ {
463 min = math.Min(min, args[i].(float64))
464 }
465
466 return min, nil
467 },
468 }
469}
470
471// interpolationFuncPathExpand will expand any `~`'s found with the full file path
472func interpolationFuncPathExpand() ast.Function {
473 return ast.Function{
474 ArgTypes: []ast.Type{ast.TypeString},
475 ReturnType: ast.TypeString,
476 Callback: func(args []interface{}) (interface{}, error) {
477 return homedir.Expand(args[0].(string))
478 },
479 }
480}
481
482// interpolationFuncCeil returns the the least integer value greater than or equal to the argument
483func interpolationFuncCeil() ast.Function {
484 return ast.Function{
485 ArgTypes: []ast.Type{ast.TypeFloat},
486 ReturnType: ast.TypeInt,
487 Callback: func(args []interface{}) (interface{}, error) {
488 return int(math.Ceil(args[0].(float64))), nil
489 },
490 }
491}
492
493// interpolationFuncLog returns the logarithnm.
494func interpolationFuncLog() ast.Function {
495 return ast.Function{
496 ArgTypes: []ast.Type{ast.TypeFloat, ast.TypeFloat},
497 ReturnType: ast.TypeFloat,
498 Callback: func(args []interface{}) (interface{}, error) {
499 return math.Log(args[0].(float64)) / math.Log(args[1].(float64)), nil
500 },
501 }
502}
503
504// interpolationFuncChomp removes trailing newlines from the given string
505func interpolationFuncChomp() ast.Function {
506 newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`)
507 return ast.Function{
508 ArgTypes: []ast.Type{ast.TypeString},
509 ReturnType: ast.TypeString,
510 Callback: func(args []interface{}) (interface{}, error) {
511 return newlines.ReplaceAllString(args[0].(string), ""), nil
512 },
513 }
514}
515
516// interpolationFuncFloorreturns returns the greatest integer value less than or equal to the argument
517func interpolationFuncFloor() ast.Function {
518 return ast.Function{
519 ArgTypes: []ast.Type{ast.TypeFloat},
520 ReturnType: ast.TypeInt,
521 Callback: func(args []interface{}) (interface{}, error) {
522 return int(math.Floor(args[0].(float64))), nil
523 },
524 }
525}
526
527func interpolationFuncZipMap() ast.Function {
528 return ast.Function{
529 ArgTypes: []ast.Type{
530 ast.TypeList, // Keys
531 ast.TypeList, // Values
532 },
533 ReturnType: ast.TypeMap,
534 Callback: func(args []interface{}) (interface{}, error) {
535 keys := args[0].([]ast.Variable)
536 values := args[1].([]ast.Variable)
537
538 if len(keys) != len(values) {
539 return nil, fmt.Errorf("count of keys (%d) does not match count of values (%d)",
540 len(keys), len(values))
541 }
542
543 for i, val := range keys {
544 if val.Type != ast.TypeString {
545 return nil, fmt.Errorf("keys must be strings. value at position %d is %s",
546 i, val.Type.Printable())
547 }
548 }
549
550 result := map[string]ast.Variable{}
551 for i := 0; i < len(keys); i++ {
552 result[keys[i].Value.(string)] = values[i]
553 }
554
555 return result, nil
556 },
557 }
558}
559
560// interpolationFuncFormatList implements the "formatlist" function that does
561// string formatting on lists.
562func interpolationFuncFormatList() ast.Function {
563 return ast.Function{
564 ArgTypes: []ast.Type{ast.TypeAny},
565 Variadic: true,
566 VariadicType: ast.TypeAny,
567 ReturnType: ast.TypeList,
568 Callback: func(args []interface{}) (interface{}, error) {
569 // Make a copy of the variadic part of args
570 // to avoid modifying the original.
571 varargs := make([]interface{}, len(args)-1)
572 copy(varargs, args[1:])
573
574 // Verify we have some arguments
575 if len(varargs) == 0 {
576 return nil, fmt.Errorf("no arguments to formatlist")
577 }
578
579 // Convert arguments that are lists into slices.
580 // Confirm along the way that all lists have the same length (n).
581 var n int
582 listSeen := false
583 for i := 1; i < len(args); i++ {
584 s, ok := args[i].([]ast.Variable)
585 if !ok {
586 continue
587 }
588
589 // Mark that we've seen at least one list
590 listSeen = true
591
592 // Convert the ast.Variable to a slice of strings
593 parts, err := listVariableValueToStringSlice(s)
594 if err != nil {
595 return nil, err
596 }
597
598 // otherwise the list is sent down to be indexed
599 varargs[i-1] = parts
600
601 // Check length
602 if n == 0 {
603 // first list we've seen
604 n = len(parts)
605 continue
606 }
607 if n != len(parts) {
608 return nil, fmt.Errorf("format: mismatched list lengths: %d != %d", n, len(parts))
609 }
610 }
611
612 // If we didn't see a list this is an error because we
613 // can't determine the return value length.
614 if !listSeen {
615 return nil, fmt.Errorf(
616 "formatlist requires at least one list argument")
617 }
618
619 // Do the formatting.
620 format := args[0].(string)
621
622 // Generate a list of formatted strings.
623 list := make([]string, n)
624 fmtargs := make([]interface{}, len(varargs))
625 for i := 0; i < n; i++ {
626 for j, arg := range varargs {
627 switch arg := arg.(type) {
628 default:
629 fmtargs[j] = arg
630 case []string:
631 fmtargs[j] = arg[i]
632 }
633 }
634 list[i] = fmt.Sprintf(format, fmtargs...)
635 }
636 return stringSliceToVariableValue(list), nil
637 },
638 }
639}
640
641// interpolationFuncIndex implements the "index" function that allows one to
642// find the index of a specific element in a list
643func interpolationFuncIndex() ast.Function {
644 return ast.Function{
645 ArgTypes: []ast.Type{ast.TypeList, ast.TypeString},
646 ReturnType: ast.TypeInt,
647 Callback: func(args []interface{}) (interface{}, error) {
648 haystack := args[0].([]ast.Variable)
649 needle := args[1].(string)
650 for index, element := range haystack {
651 if needle == element.Value {
652 return index, nil
653 }
654 }
655 return nil, fmt.Errorf("Could not find '%s' in '%s'", needle, haystack)
656 },
657 }
658}
659
660// interpolationFuncBasename implements the "dirname" function.
661func interpolationFuncDirname() ast.Function {
662 return ast.Function{
663 ArgTypes: []ast.Type{ast.TypeString},
664 ReturnType: ast.TypeString,
665 Callback: func(args []interface{}) (interface{}, error) {
666 return filepath.Dir(args[0].(string)), nil
667 },
668 }
669}
670
671// interpolationFuncDistinct implements the "distinct" function that
672// removes duplicate elements from a list.
673func interpolationFuncDistinct() ast.Function {
674 return ast.Function{
675 ArgTypes: []ast.Type{ast.TypeList},
676 ReturnType: ast.TypeList,
677 Variadic: true,
678 VariadicType: ast.TypeList,
679 Callback: func(args []interface{}) (interface{}, error) {
680 var list []string
681
682 if len(args) != 1 {
683 return nil, fmt.Errorf("accepts only one argument.")
684 }
685
686 if argument, ok := args[0].([]ast.Variable); ok {
687 for _, element := range argument {
688 if element.Type != ast.TypeString {
689 return nil, fmt.Errorf(
690 "only works for flat lists, this list contains elements of %s",
691 element.Type.Printable())
692 }
693 list = appendIfMissing(list, element.Value.(string))
694 }
695 }
696
697 return stringSliceToVariableValue(list), nil
698 },
699 }
700}
701
702// helper function to add an element to a list, if it does not already exsit
703func appendIfMissing(slice []string, element string) []string {
704 for _, ele := range slice {
705 if ele == element {
706 return slice
707 }
708 }
709 return append(slice, element)
710}
711
712// for two lists `keys` and `values` of equal length, returns all elements
713// from `values` where the corresponding element from `keys` is in `searchset`.
714func interpolationFuncMatchKeys() ast.Function {
715 return ast.Function{
716 ArgTypes: []ast.Type{ast.TypeList, ast.TypeList, ast.TypeList},
717 ReturnType: ast.TypeList,
718 Callback: func(args []interface{}) (interface{}, error) {
719 output := make([]ast.Variable, 0)
720
721 values, _ := args[0].([]ast.Variable)
722 keys, _ := args[1].([]ast.Variable)
723 searchset, _ := args[2].([]ast.Variable)
724
725 if len(keys) != len(values) {
726 return nil, fmt.Errorf("length of keys and values should be equal")
727 }
728
729 for i, key := range keys {
730 for _, search := range searchset {
731 if res, err := compareSimpleVariables(key, search); err != nil {
732 return nil, err
733 } else if res == true {
734 output = append(output, values[i])
735 break
736 }
737 }
738 }
739 // if searchset is empty, then output is an empty list as well.
740 // if we haven't matched any key, then output is an empty list.
741 return output, nil
742 },
743 }
744}
745
746// compare two variables of the same type, i.e. non complex one, such as TypeList or TypeMap
747func compareSimpleVariables(a, b ast.Variable) (bool, error) {
748 if a.Type != b.Type {
749 return false, fmt.Errorf(
750 "won't compare items of different types %s and %s",
751 a.Type.Printable(), b.Type.Printable())
752 }
753 switch a.Type {
754 case ast.TypeString:
755 return a.Value.(string) == b.Value.(string), nil
756 default:
757 return false, fmt.Errorf(
758 "can't compare items of type %s",
759 a.Type.Printable())
760 }
761}
762
763// interpolationFuncJoin implements the "join" function that allows
764// multi-variable values to be joined by some character.
765func interpolationFuncJoin() ast.Function {
766 return ast.Function{
767 ArgTypes: []ast.Type{ast.TypeString},
768 Variadic: true,
769 VariadicType: ast.TypeList,
770 ReturnType: ast.TypeString,
771 Callback: func(args []interface{}) (interface{}, error) {
772 var list []string
773
774 if len(args) < 2 {
775 return nil, fmt.Errorf("not enough arguments to join()")
776 }
777
778 for _, arg := range args[1:] {
779 for _, part := range arg.([]ast.Variable) {
780 if part.Type != ast.TypeString {
781 return nil, fmt.Errorf(
782 "only works on flat lists, this list contains elements of %s",
783 part.Type.Printable())
784 }
785 list = append(list, part.Value.(string))
786 }
787 }
788
789 return strings.Join(list, args[0].(string)), nil
790 },
791 }
792}
793
794// interpolationFuncJSONEncode implements the "jsonencode" function that encodes
795// a string, list, or map as its JSON representation. For now, values in the
796// list or map may only be strings.
797func interpolationFuncJSONEncode() ast.Function {
798 return ast.Function{
799 ArgTypes: []ast.Type{ast.TypeAny},
800 ReturnType: ast.TypeString,
801 Callback: func(args []interface{}) (interface{}, error) {
802 var toEncode interface{}
803
804 switch typedArg := args[0].(type) {
805 case string:
806 toEncode = typedArg
807
808 case []ast.Variable:
809 // We preallocate the list here. Note that it's important that in
810 // the length 0 case, we have an empty list rather than nil, as
811 // they encode differently.
812 // XXX It would be nice to support arbitrarily nested data here. Is
813 // there an inverse of hil.InterfaceToVariable?
814 strings := make([]string, len(typedArg))
815
816 for i, v := range typedArg {
817 if v.Type != ast.TypeString {
818 return "", fmt.Errorf("list elements must be strings")
819 }
820 strings[i] = v.Value.(string)
821 }
822 toEncode = strings
823
824 case map[string]ast.Variable:
825 // XXX It would be nice to support arbitrarily nested data here. Is
826 // there an inverse of hil.InterfaceToVariable?
827 stringMap := make(map[string]string)
828 for k, v := range typedArg {
829 if v.Type != ast.TypeString {
830 return "", fmt.Errorf("map values must be strings")
831 }
832 stringMap[k] = v.Value.(string)
833 }
834 toEncode = stringMap
835
836 default:
837 return "", fmt.Errorf("unknown type for JSON encoding: %T", args[0])
838 }
839
840 jEnc, err := json.Marshal(toEncode)
841 if err != nil {
842 return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode)
843 }
844 return string(jEnc), nil
845 },
846 }
847}
848
849// interpolationFuncReplace implements the "replace" function that does
850// string replacement.
851func interpolationFuncReplace() ast.Function {
852 return ast.Function{
853 ArgTypes: []ast.Type{ast.TypeString, ast.TypeString, ast.TypeString},
854 ReturnType: ast.TypeString,
855 Callback: func(args []interface{}) (interface{}, error) {
856 s := args[0].(string)
857 search := args[1].(string)
858 replace := args[2].(string)
859
860 // We search/replace using a regexp if the string is surrounded
861 // in forward slashes.
862 if len(search) > 1 && search[0] == '/' && search[len(search)-1] == '/' {
863 re, err := regexp.Compile(search[1 : len(search)-1])
864 if err != nil {
865 return nil, err
866 }
867
868 return re.ReplaceAllString(s, replace), nil
869 }
870
871 return strings.Replace(s, search, replace, -1), nil
872 },
873 }
874}
875
876func interpolationFuncLength() ast.Function {
877 return ast.Function{
878 ArgTypes: []ast.Type{ast.TypeAny},
879 ReturnType: ast.TypeInt,
880 Variadic: false,
881 Callback: func(args []interface{}) (interface{}, error) {
882 subject := args[0]
883
884 switch typedSubject := subject.(type) {
885 case string:
886 return len(typedSubject), nil
887 case []ast.Variable:
888 return len(typedSubject), nil
889 case map[string]ast.Variable:
890 return len(typedSubject), nil
891 }
892
893 return 0, fmt.Errorf("arguments to length() must be a string, list, or map")
894 },
895 }
896}
897
898func interpolationFuncSignum() ast.Function {
899 return ast.Function{
900 ArgTypes: []ast.Type{ast.TypeInt},
901 ReturnType: ast.TypeInt,
902 Variadic: false,
903 Callback: func(args []interface{}) (interface{}, error) {
904 num := args[0].(int)
905 switch {
906 case num < 0:
907 return -1, nil
908 case num > 0:
909 return +1, nil
910 default:
911 return 0, nil
912 }
913 },
914 }
915}
916
917// interpolationFuncSlice returns a portion of the input list between from, inclusive and to, exclusive.
918func interpolationFuncSlice() ast.Function {
919 return ast.Function{
920 ArgTypes: []ast.Type{
921 ast.TypeList, // inputList
922 ast.TypeInt, // from
923 ast.TypeInt, // to
924 },
925 ReturnType: ast.TypeList,
926 Variadic: false,
927 Callback: func(args []interface{}) (interface{}, error) {
928 inputList := args[0].([]ast.Variable)
929 from := args[1].(int)
930 to := args[2].(int)
931
932 if from < 0 {
933 return nil, fmt.Errorf("from index must be >= 0")
934 }
935 if to > len(inputList) {
936 return nil, fmt.Errorf("to index must be <= length of the input list")
937 }
938 if from > to {
939 return nil, fmt.Errorf("from index must be <= to index")
940 }
941
942 var outputList []ast.Variable
943 for i, val := range inputList {
944 if i >= from && i < to {
945 outputList = append(outputList, val)
946 }
947 }
948 return outputList, nil
949 },
950 }
951}
952
953// interpolationFuncSort sorts a list of a strings lexographically
954func interpolationFuncSort() ast.Function {
955 return ast.Function{
956 ArgTypes: []ast.Type{ast.TypeList},
957 ReturnType: ast.TypeList,
958 Variadic: false,
959 Callback: func(args []interface{}) (interface{}, error) {
960 inputList := args[0].([]ast.Variable)
961
962 // Ensure that all the list members are strings and
963 // create a string slice from them
964 members := make([]string, len(inputList))
965 for i, val := range inputList {
966 if val.Type != ast.TypeString {
967 return nil, fmt.Errorf(
968 "sort() may only be used with lists of strings - %s at index %d",
969 val.Type.String(), i)
970 }
971
972 members[i] = val.Value.(string)
973 }
974
975 sort.Strings(members)
976 return stringSliceToVariableValue(members), nil
977 },
978 }
979}
980
981// interpolationFuncSplit implements the "split" function that allows
982// strings to split into multi-variable values
983func interpolationFuncSplit() ast.Function {
984 return ast.Function{
985 ArgTypes: []ast.Type{ast.TypeString, ast.TypeString},
986 ReturnType: ast.TypeList,
987 Callback: func(args []interface{}) (interface{}, error) {
988 sep := args[0].(string)
989 s := args[1].(string)
990 elements := strings.Split(s, sep)
991 return stringSliceToVariableValue(elements), nil
992 },
993 }
994}
995
996// interpolationFuncLookup implements the "lookup" function that allows
997// dynamic lookups of map types within a Terraform configuration.
998func interpolationFuncLookup(vs map[string]ast.Variable) ast.Function {
999 return ast.Function{
1000 ArgTypes: []ast.Type{ast.TypeMap, ast.TypeString},
1001 ReturnType: ast.TypeString,
1002 Variadic: true,
1003 VariadicType: ast.TypeString,
1004 Callback: func(args []interface{}) (interface{}, error) {
1005 defaultValue := ""
1006 defaultValueSet := false
1007 if len(args) > 2 {
1008 defaultValue = args[2].(string)
1009 defaultValueSet = true
1010 }
1011 if len(args) > 3 {
1012 return "", fmt.Errorf("lookup() takes no more than three arguments")
1013 }
1014 index := args[1].(string)
1015 mapVar := args[0].(map[string]ast.Variable)
1016
1017 v, ok := mapVar[index]
1018 if !ok {
1019 if defaultValueSet {
1020 return defaultValue, nil
1021 } else {
1022 return "", fmt.Errorf(
1023 "lookup failed to find '%s'",
1024 args[1].(string))
1025 }
1026 }
1027 if v.Type != ast.TypeString {
1028 return nil, fmt.Errorf(
1029 "lookup() may only be used with flat maps, this map contains elements of %s",
1030 v.Type.Printable())
1031 }
1032
1033 return v.Value.(string), nil
1034 },
1035 }
1036}
1037
1038// interpolationFuncElement implements the "element" function that allows
1039// a specific index to be looked up in a multi-variable value. Note that this will
1040// wrap if the index is larger than the number of elements in the multi-variable value.
1041func interpolationFuncElement() ast.Function {
1042 return ast.Function{
1043 ArgTypes: []ast.Type{ast.TypeList, ast.TypeString},
1044 ReturnType: ast.TypeString,
1045 Callback: func(args []interface{}) (interface{}, error) {
1046 list := args[0].([]ast.Variable)
1047 if len(list) == 0 {
1048 return nil, fmt.Errorf("element() may not be used with an empty list")
1049 }
1050
1051 index, err := strconv.Atoi(args[1].(string))
1052 if err != nil || index < 0 {
1053 return "", fmt.Errorf(
1054 "invalid number for index, got %s", args[1])
1055 }
1056
1057 resolvedIndex := index % len(list)
1058
1059 v := list[resolvedIndex]
1060 if v.Type != ast.TypeString {
1061 return nil, fmt.Errorf(
1062 "element() may only be used with flat lists, this list contains elements of %s",
1063 v.Type.Printable())
1064 }
1065 return v.Value, nil
1066 },
1067 }
1068}
1069
1070// interpolationFuncKeys implements the "keys" function that yields a list of
1071// keys of map types within a Terraform configuration.
1072func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function {
1073 return ast.Function{
1074 ArgTypes: []ast.Type{ast.TypeMap},
1075 ReturnType: ast.TypeList,
1076 Callback: func(args []interface{}) (interface{}, error) {
1077 mapVar := args[0].(map[string]ast.Variable)
1078 keys := make([]string, 0)
1079
1080 for k, _ := range mapVar {
1081 keys = append(keys, k)
1082 }
1083
1084 sort.Strings(keys)
1085
1086 // Keys are guaranteed to be strings
1087 return stringSliceToVariableValue(keys), nil
1088 },
1089 }
1090}
1091
1092// interpolationFuncValues implements the "values" function that yields a list of
1093// keys of map types within a Terraform configuration.
1094func interpolationFuncValues(vs map[string]ast.Variable) ast.Function {
1095 return ast.Function{
1096 ArgTypes: []ast.Type{ast.TypeMap},
1097 ReturnType: ast.TypeList,
1098 Callback: func(args []interface{}) (interface{}, error) {
1099 mapVar := args[0].(map[string]ast.Variable)
1100 keys := make([]string, 0)
1101
1102 for k, _ := range mapVar {
1103 keys = append(keys, k)
1104 }
1105
1106 sort.Strings(keys)
1107
1108 values := make([]string, len(keys))
1109 for index, key := range keys {
1110 if value, ok := mapVar[key].Value.(string); ok {
1111 values[index] = value
1112 } else {
1113 return "", fmt.Errorf("values(): %q has element with bad type %s",
1114 key, mapVar[key].Type)
1115 }
1116 }
1117
1118 variable, err := hil.InterfaceToVariable(values)
1119 if err != nil {
1120 return nil, err
1121 }
1122
1123 return variable.Value, nil
1124 },
1125 }
1126}
1127
1128// interpolationFuncBasename implements the "basename" function.
1129func interpolationFuncBasename() ast.Function {
1130 return ast.Function{
1131 ArgTypes: []ast.Type{ast.TypeString},
1132 ReturnType: ast.TypeString,
1133 Callback: func(args []interface{}) (interface{}, error) {
1134 return filepath.Base(args[0].(string)), nil
1135 },
1136 }
1137}
1138
1139// interpolationFuncBase64Encode implements the "base64encode" function that
1140// allows Base64 encoding.
1141func interpolationFuncBase64Encode() ast.Function {
1142 return ast.Function{
1143 ArgTypes: []ast.Type{ast.TypeString},
1144 ReturnType: ast.TypeString,
1145 Callback: func(args []interface{}) (interface{}, error) {
1146 s := args[0].(string)
1147 return base64.StdEncoding.EncodeToString([]byte(s)), nil
1148 },
1149 }
1150}
1151
1152// interpolationFuncBase64Decode implements the "base64decode" function that
1153// allows Base64 decoding.
1154func interpolationFuncBase64Decode() ast.Function {
1155 return ast.Function{
1156 ArgTypes: []ast.Type{ast.TypeString},
1157 ReturnType: ast.TypeString,
1158 Callback: func(args []interface{}) (interface{}, error) {
1159 s := args[0].(string)
1160 sDec, err := base64.StdEncoding.DecodeString(s)
1161 if err != nil {
1162 return "", fmt.Errorf("failed to decode base64 data '%s'", s)
1163 }
1164 return string(sDec), nil
1165 },
1166 }
1167}
1168
1169// interpolationFuncLower implements the "lower" function that does
1170// string lower casing.
1171func interpolationFuncLower() ast.Function {
1172 return ast.Function{
1173 ArgTypes: []ast.Type{ast.TypeString},
1174 ReturnType: ast.TypeString,
1175 Callback: func(args []interface{}) (interface{}, error) {
1176 toLower := args[0].(string)
1177 return strings.ToLower(toLower), nil
1178 },
1179 }
1180}
1181
1182func interpolationFuncMd5() ast.Function {
1183 return ast.Function{
1184 ArgTypes: []ast.Type{ast.TypeString},
1185 ReturnType: ast.TypeString,
1186 Callback: func(args []interface{}) (interface{}, error) {
1187 s := args[0].(string)
1188 h := md5.New()
1189 h.Write([]byte(s))
1190 hash := hex.EncodeToString(h.Sum(nil))
1191 return hash, nil
1192 },
1193 }
1194}
1195
1196func interpolationFuncMerge() ast.Function {
1197 return ast.Function{
1198 ArgTypes: []ast.Type{ast.TypeMap},
1199 ReturnType: ast.TypeMap,
1200 Variadic: true,
1201 VariadicType: ast.TypeMap,
1202 Callback: func(args []interface{}) (interface{}, error) {
1203 outputMap := make(map[string]ast.Variable)
1204
1205 for _, arg := range args {
1206 for k, v := range arg.(map[string]ast.Variable) {
1207 outputMap[k] = v
1208 }
1209 }
1210
1211 return outputMap, nil
1212 },
1213 }
1214}
1215
1216// interpolationFuncUpper implements the "upper" function that does
1217// string upper casing.
1218func interpolationFuncUpper() ast.Function {
1219 return ast.Function{
1220 ArgTypes: []ast.Type{ast.TypeString},
1221 ReturnType: ast.TypeString,
1222 Callback: func(args []interface{}) (interface{}, error) {
1223 toUpper := args[0].(string)
1224 return strings.ToUpper(toUpper), nil
1225 },
1226 }
1227}
1228
1229func interpolationFuncSha1() ast.Function {
1230 return ast.Function{
1231 ArgTypes: []ast.Type{ast.TypeString},
1232 ReturnType: ast.TypeString,
1233 Callback: func(args []interface{}) (interface{}, error) {
1234 s := args[0].(string)
1235 h := sha1.New()
1236 h.Write([]byte(s))
1237 hash := hex.EncodeToString(h.Sum(nil))
1238 return hash, nil
1239 },
1240 }
1241}
1242
1243// hexadecimal representation of sha256 sum
1244func interpolationFuncSha256() ast.Function {
1245 return ast.Function{
1246 ArgTypes: []ast.Type{ast.TypeString},
1247 ReturnType: ast.TypeString,
1248 Callback: func(args []interface{}) (interface{}, error) {
1249 s := args[0].(string)
1250 h := sha256.New()
1251 h.Write([]byte(s))
1252 hash := hex.EncodeToString(h.Sum(nil))
1253 return hash, nil
1254 },
1255 }
1256}
1257
1258func interpolationFuncSha512() ast.Function {
1259 return ast.Function{
1260 ArgTypes: []ast.Type{ast.TypeString},
1261 ReturnType: ast.TypeString,
1262 Callback: func(args []interface{}) (interface{}, error) {
1263 s := args[0].(string)
1264 h := sha512.New()
1265 h.Write([]byte(s))
1266 hash := hex.EncodeToString(h.Sum(nil))
1267 return hash, nil
1268 },
1269 }
1270}
1271
1272func interpolationFuncTrimSpace() ast.Function {
1273 return ast.Function{
1274 ArgTypes: []ast.Type{ast.TypeString},
1275 ReturnType: ast.TypeString,
1276 Callback: func(args []interface{}) (interface{}, error) {
1277 trimSpace := args[0].(string)
1278 return strings.TrimSpace(trimSpace), nil
1279 },
1280 }
1281}
1282
1283func interpolationFuncBase64Sha256() ast.Function {
1284 return ast.Function{
1285 ArgTypes: []ast.Type{ast.TypeString},
1286 ReturnType: ast.TypeString,
1287 Callback: func(args []interface{}) (interface{}, error) {
1288 s := args[0].(string)
1289 h := sha256.New()
1290 h.Write([]byte(s))
1291 shaSum := h.Sum(nil)
1292 encoded := base64.StdEncoding.EncodeToString(shaSum[:])
1293 return encoded, nil
1294 },
1295 }
1296}
1297
1298func interpolationFuncBase64Sha512() ast.Function {
1299 return ast.Function{
1300 ArgTypes: []ast.Type{ast.TypeString},
1301 ReturnType: ast.TypeString,
1302 Callback: func(args []interface{}) (interface{}, error) {
1303 s := args[0].(string)
1304 h := sha512.New()
1305 h.Write([]byte(s))
1306 shaSum := h.Sum(nil)
1307 encoded := base64.StdEncoding.EncodeToString(shaSum[:])
1308 return encoded, nil
1309 },
1310 }
1311}
1312
1313func interpolationFuncUUID() ast.Function {
1314 return ast.Function{
1315 ArgTypes: []ast.Type{},
1316 ReturnType: ast.TypeString,
1317 Callback: func(args []interface{}) (interface{}, error) {
1318 return uuid.GenerateUUID()
1319 },
1320 }
1321}
1322
1323// interpolationFuncTimestamp
1324func interpolationFuncTimestamp() ast.Function {
1325 return ast.Function{
1326 ArgTypes: []ast.Type{},
1327 ReturnType: ast.TypeString,
1328 Callback: func(args []interface{}) (interface{}, error) {
1329 return time.Now().UTC().Format(time.RFC3339), nil
1330 },
1331 }
1332}
1333
1334// interpolationFuncTitle implements the "title" function that returns a copy of the
1335// string in which first characters of all the words are capitalized.
1336func interpolationFuncTitle() ast.Function {
1337 return ast.Function{
1338 ArgTypes: []ast.Type{ast.TypeString},
1339 ReturnType: ast.TypeString,
1340 Callback: func(args []interface{}) (interface{}, error) {
1341 toTitle := args[0].(string)
1342 return strings.Title(toTitle), nil
1343 },
1344 }
1345}
1346
1347// interpolationFuncSubstr implements the "substr" function that allows strings
1348// to be truncated.
1349func interpolationFuncSubstr() ast.Function {
1350 return ast.Function{
1351 ArgTypes: []ast.Type{
1352 ast.TypeString, // input string
1353 ast.TypeInt, // offset
1354 ast.TypeInt, // length
1355 },
1356 ReturnType: ast.TypeString,
1357 Callback: func(args []interface{}) (interface{}, error) {
1358 str := args[0].(string)
1359 offset := args[1].(int)
1360 length := args[2].(int)
1361
1362 // Interpret a negative offset as being equivalent to a positive
1363 // offset taken from the end of the string.
1364 if offset < 0 {
1365 offset += len(str)
1366 }
1367
1368 // Interpret a length of `-1` as indicating that the substring
1369 // should start at `offset` and continue until the end of the
1370 // string. Any other negative length (other than `-1`) is invalid.
1371 if length == -1 {
1372 length = len(str)
1373 } else if length >= 0 {
1374 length += offset
1375 } else {
1376 return nil, fmt.Errorf("length should be a non-negative integer")
1377 }
1378
1379 if offset > len(str) {
1380 return nil, fmt.Errorf("offset cannot be larger than the length of the string")
1381 }
1382
1383 if length > len(str) {
1384 return nil, fmt.Errorf("'offset + length' cannot be larger than the length of the string")
1385 }
1386
1387 return str[offset:length], nil
1388 },
1389 }
1390}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
new file mode 100644
index 0000000..ead3d10
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
@@ -0,0 +1,283 @@
1package config
2
3import (
4 "fmt"
5 "reflect"
6 "strings"
7
8 "github.com/hashicorp/hil"
9 "github.com/hashicorp/hil/ast"
10 "github.com/mitchellh/reflectwalk"
11)
12
13// interpolationWalker implements interfaces for the reflectwalk package
14// (github.com/mitchellh/reflectwalk) that can be used to automatically
15// execute a callback for an interpolation.
16type interpolationWalker struct {
17 // F is the function to call for every interpolation. It can be nil.
18 //
19 // If Replace is true, then the return value of F will be used to
20 // replace the interpolation.
21 F interpolationWalkerFunc
22 Replace bool
23
24 // ContextF is an advanced version of F that also receives the
25 // location of where it is in the structure. This lets you do
26 // context-aware validation.
27 ContextF interpolationWalkerContextFunc
28
29 key []string
30 lastValue reflect.Value
31 loc reflectwalk.Location
32 cs []reflect.Value
33 csKey []reflect.Value
34 csData interface{}
35 sliceIndex []int
36 unknownKeys []string
37}
38
39// interpolationWalkerFunc is the callback called by interpolationWalk.
40// It is called with any interpolation found. It should return a value
41// to replace the interpolation with, along with any errors.
42//
43// If Replace is set to false in interpolationWalker, then the replace
44// value can be anything as it will have no effect.
45type interpolationWalkerFunc func(ast.Node) (interface{}, error)
46
47// interpolationWalkerContextFunc is called by interpolationWalk if
48// ContextF is set. This receives both the interpolation and the location
49// where the interpolation is.
50//
51// This callback can be used to validate the location of the interpolation
52// within the configuration.
53type interpolationWalkerContextFunc func(reflectwalk.Location, ast.Node)
54
55func (w *interpolationWalker) Enter(loc reflectwalk.Location) error {
56 w.loc = loc
57 return nil
58}
59
60func (w *interpolationWalker) Exit(loc reflectwalk.Location) error {
61 w.loc = reflectwalk.None
62
63 switch loc {
64 case reflectwalk.Map:
65 w.cs = w.cs[:len(w.cs)-1]
66 case reflectwalk.MapValue:
67 w.key = w.key[:len(w.key)-1]
68 w.csKey = w.csKey[:len(w.csKey)-1]
69 case reflectwalk.Slice:
70 // Split any values that need to be split
71 w.splitSlice()
72 w.cs = w.cs[:len(w.cs)-1]
73 case reflectwalk.SliceElem:
74 w.csKey = w.csKey[:len(w.csKey)-1]
75 w.sliceIndex = w.sliceIndex[:len(w.sliceIndex)-1]
76 }
77
78 return nil
79}
80
81func (w *interpolationWalker) Map(m reflect.Value) error {
82 w.cs = append(w.cs, m)
83 return nil
84}
85
86func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error {
87 w.csData = k
88 w.csKey = append(w.csKey, k)
89
90 if l := len(w.sliceIndex); l > 0 {
91 w.key = append(w.key, fmt.Sprintf("%d.%s", w.sliceIndex[l-1], k.String()))
92 } else {
93 w.key = append(w.key, k.String())
94 }
95
96 w.lastValue = v
97 return nil
98}
99
100func (w *interpolationWalker) Slice(s reflect.Value) error {
101 w.cs = append(w.cs, s)
102 return nil
103}
104
105func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error {
106 w.csKey = append(w.csKey, reflect.ValueOf(i))
107 w.sliceIndex = append(w.sliceIndex, i)
108 return nil
109}
110
111func (w *interpolationWalker) Primitive(v reflect.Value) error {
112 setV := v
113
114 // We only care about strings
115 if v.Kind() == reflect.Interface {
116 setV = v
117 v = v.Elem()
118 }
119 if v.Kind() != reflect.String {
120 return nil
121 }
122
123 astRoot, err := hil.Parse(v.String())
124 if err != nil {
125 return err
126 }
127
128 // If the AST we got is just a literal string value with the same
129 // value then we ignore it. We have to check if its the same value
130 // because it is possible to input a string, get out a string, and
131 // have it be different. For example: "foo-$${bar}" turns into
132 // "foo-${bar}"
133 if n, ok := astRoot.(*ast.LiteralNode); ok {
134 if s, ok := n.Value.(string); ok && s == v.String() {
135 return nil
136 }
137 }
138
139 if w.ContextF != nil {
140 w.ContextF(w.loc, astRoot)
141 }
142
143 if w.F == nil {
144 return nil
145 }
146
147 replaceVal, err := w.F(astRoot)
148 if err != nil {
149 return fmt.Errorf(
150 "%s in:\n\n%s",
151 err, v.String())
152 }
153
154 if w.Replace {
155 // We need to determine if we need to remove this element
156 // if the result contains any "UnknownVariableValue" which is
157 // set if it is computed. This behavior is different if we're
158 // splitting (in a SliceElem) or not.
159 remove := false
160 if w.loc == reflectwalk.SliceElem {
161 switch typedReplaceVal := replaceVal.(type) {
162 case string:
163 if typedReplaceVal == UnknownVariableValue {
164 remove = true
165 }
166 case []interface{}:
167 if hasUnknownValue(typedReplaceVal) {
168 remove = true
169 }
170 }
171 } else if replaceVal == UnknownVariableValue {
172 remove = true
173 }
174
175 if remove {
176 w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, "."))
177 }
178
179 resultVal := reflect.ValueOf(replaceVal)
180 switch w.loc {
181 case reflectwalk.MapKey:
182 m := w.cs[len(w.cs)-1]
183
184 // Delete the old value
185 var zero reflect.Value
186 m.SetMapIndex(w.csData.(reflect.Value), zero)
187
188 // Set the new key with the existing value
189 m.SetMapIndex(resultVal, w.lastValue)
190
191 // Set the key to be the new key
192 w.csData = resultVal
193 case reflectwalk.MapValue:
194 // If we're in a map, then the only way to set a map value is
195 // to set it directly.
196 m := w.cs[len(w.cs)-1]
197 mk := w.csData.(reflect.Value)
198 m.SetMapIndex(mk, resultVal)
199 default:
200 // Otherwise, we should be addressable
201 setV.Set(resultVal)
202 }
203 }
204
205 return nil
206}
207
208func (w *interpolationWalker) replaceCurrent(v reflect.Value) {
209 // if we don't have at least 2 values, we're not going to find a map, but
210 // we could panic.
211 if len(w.cs) < 2 {
212 return
213 }
214
215 c := w.cs[len(w.cs)-2]
216 switch c.Kind() {
217 case reflect.Map:
218 // Get the key and delete it
219 k := w.csKey[len(w.csKey)-1]
220 c.SetMapIndex(k, v)
221 }
222}
223
224func hasUnknownValue(variable []interface{}) bool {
225 for _, value := range variable {
226 if strVal, ok := value.(string); ok {
227 if strVal == UnknownVariableValue {
228 return true
229 }
230 }
231 }
232 return false
233}
234
235func (w *interpolationWalker) splitSlice() {
236 raw := w.cs[len(w.cs)-1]
237
238 var s []interface{}
239 switch v := raw.Interface().(type) {
240 case []interface{}:
241 s = v
242 case []map[string]interface{}:
243 return
244 }
245
246 split := false
247 for _, val := range s {
248 if varVal, ok := val.(ast.Variable); ok && varVal.Type == ast.TypeList {
249 split = true
250 }
251 if _, ok := val.([]interface{}); ok {
252 split = true
253 }
254 }
255
256 if !split {
257 return
258 }
259
260 result := make([]interface{}, 0)
261 for _, v := range s {
262 switch val := v.(type) {
263 case ast.Variable:
264 switch val.Type {
265 case ast.TypeList:
266 elements := val.Value.([]ast.Variable)
267 for _, element := range elements {
268 result = append(result, element.Value)
269 }
270 default:
271 result = append(result, val.Value)
272 }
273 case []interface{}:
274 for _, element := range val {
275 result = append(result, element)
276 }
277 default:
278 result = append(result, v)
279 }
280 }
281
282 w.replaceCurrent(reflect.ValueOf(result))
283}
diff --git a/vendor/github.com/hashicorp/terraform/config/lang.go b/vendor/github.com/hashicorp/terraform/config/lang.go
new file mode 100644
index 0000000..890d30b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/lang.go
@@ -0,0 +1,11 @@
1package config
2
3import (
4 "github.com/hashicorp/hil/ast"
5)
6
7type noopNode struct{}
8
9func (n *noopNode) Accept(ast.Visitor) ast.Node { return n }
10func (n *noopNode) Pos() ast.Pos { return ast.Pos{} }
11func (n *noopNode) Type(ast.Scope) (ast.Type, error) { return ast.TypeString, nil }
diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go
new file mode 100644
index 0000000..0bfa89c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/loader.go
@@ -0,0 +1,224 @@
1package config
2
3import (
4 "encoding/json"
5 "fmt"
6 "io"
7 "os"
8 "path/filepath"
9 "sort"
10 "strings"
11
12 "github.com/hashicorp/hcl"
13)
14
15// ErrNoConfigsFound is the error returned by LoadDir if no
16// Terraform configuration files were found in the given directory.
17type ErrNoConfigsFound struct {
18 Dir string
19}
20
21func (e ErrNoConfigsFound) Error() string {
22 return fmt.Sprintf(
23 "No Terraform configuration files found in directory: %s",
24 e.Dir)
25}
26
27// LoadJSON loads a single Terraform configuration from a given JSON document.
28//
29// The document must be a complete Terraform configuration. This function will
30// NOT try to load any additional modules so only the given document is loaded.
31func LoadJSON(raw json.RawMessage) (*Config, error) {
32 obj, err := hcl.Parse(string(raw))
33 if err != nil {
34 return nil, fmt.Errorf(
35 "Error parsing JSON document as HCL: %s", err)
36 }
37
38 // Start building the result
39 hclConfig := &hclConfigurable{
40 Root: obj,
41 }
42
43 return hclConfig.Config()
44}
45
46// LoadFile loads the Terraform configuration from a given file.
47//
48// This file can be any format that Terraform recognizes, and import any
49// other format that Terraform recognizes.
50func LoadFile(path string) (*Config, error) {
51 importTree, err := loadTree(path)
52 if err != nil {
53 return nil, err
54 }
55
56 configTree, err := importTree.ConfigTree()
57
58 // Close the importTree now so that we can clear resources as quickly
59 // as possible.
60 importTree.Close()
61
62 if err != nil {
63 return nil, err
64 }
65
66 return configTree.Flatten()
67}
68
69// LoadDir loads all the Terraform configuration files in a single
70// directory and appends them together.
71//
72// Special files known as "override files" can also be present, which
73// are merged into the loaded configuration. That is, the non-override
74// files are loaded first to create the configuration. Then, the overrides
75// are merged into the configuration to create the final configuration.
76//
77// Files are loaded in lexical order.
78func LoadDir(root string) (*Config, error) {
79 files, overrides, err := dirFiles(root)
80 if err != nil {
81 return nil, err
82 }
83 if len(files) == 0 {
84 return nil, &ErrNoConfigsFound{Dir: root}
85 }
86
87 // Determine the absolute path to the directory.
88 rootAbs, err := filepath.Abs(root)
89 if err != nil {
90 return nil, err
91 }
92
93 var result *Config
94
95 // Sort the files and overrides so we have a deterministic order
96 sort.Strings(files)
97 sort.Strings(overrides)
98
99 // Load all the regular files, append them to each other.
100 for _, f := range files {
101 c, err := LoadFile(f)
102 if err != nil {
103 return nil, err
104 }
105
106 if result != nil {
107 result, err = Append(result, c)
108 if err != nil {
109 return nil, err
110 }
111 } else {
112 result = c
113 }
114 }
115
116 // Load all the overrides, and merge them into the config
117 for _, f := range overrides {
118 c, err := LoadFile(f)
119 if err != nil {
120 return nil, err
121 }
122
123 result, err = Merge(result, c)
124 if err != nil {
125 return nil, err
126 }
127 }
128
129 // Mark the directory
130 result.Dir = rootAbs
131
132 return result, nil
133}
134
135// IsEmptyDir returns true if the directory given has no Terraform
136// configuration files.
137func IsEmptyDir(root string) (bool, error) {
138 if _, err := os.Stat(root); err != nil && os.IsNotExist(err) {
139 return true, nil
140 }
141
142 fs, os, err := dirFiles(root)
143 if err != nil {
144 return false, err
145 }
146
147 return len(fs) == 0 && len(os) == 0, nil
148}
149
150// Ext returns the Terraform configuration extension of the given
151// path, or a blank string if it is an invalid function.
152func ext(path string) string {
153 if strings.HasSuffix(path, ".tf") {
154 return ".tf"
155 } else if strings.HasSuffix(path, ".tf.json") {
156 return ".tf.json"
157 } else {
158 return ""
159 }
160}
161
162func dirFiles(dir string) ([]string, []string, error) {
163 f, err := os.Open(dir)
164 if err != nil {
165 return nil, nil, err
166 }
167 defer f.Close()
168
169 fi, err := f.Stat()
170 if err != nil {
171 return nil, nil, err
172 }
173 if !fi.IsDir() {
174 return nil, nil, fmt.Errorf(
175 "configuration path must be a directory: %s",
176 dir)
177 }
178
179 var files, overrides []string
180 err = nil
181 for err != io.EOF {
182 var fis []os.FileInfo
183 fis, err = f.Readdir(128)
184 if err != nil && err != io.EOF {
185 return nil, nil, err
186 }
187
188 for _, fi := range fis {
189 // Ignore directories
190 if fi.IsDir() {
191 continue
192 }
193
194 // Only care about files that are valid to load
195 name := fi.Name()
196 extValue := ext(name)
197 if extValue == "" || isIgnoredFile(name) {
198 continue
199 }
200
201 // Determine if we're dealing with an override
202 nameNoExt := name[:len(name)-len(extValue)]
203 override := nameNoExt == "override" ||
204 strings.HasSuffix(nameNoExt, "_override")
205
206 path := filepath.Join(dir, name)
207 if override {
208 overrides = append(overrides, path)
209 } else {
210 files = append(files, path)
211 }
212 }
213 }
214
215 return files, overrides, nil
216}
217
218// isIgnoredFile returns true or false depending on whether the
219// provided file name is a file that should be ignored.
220func isIgnoredFile(name string) bool {
221 return strings.HasPrefix(name, ".") || // Unix-like hidden files
222 strings.HasSuffix(name, "~") || // vim
223 strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs
224}
diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go
new file mode 100644
index 0000000..9abb196
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go
@@ -0,0 +1,1130 @@
1package config
2
3import (
4 "fmt"
5 "io/ioutil"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/hashicorp/hcl"
9 "github.com/hashicorp/hcl/hcl/ast"
10 "github.com/mitchellh/mapstructure"
11)
12
13// hclConfigurable is an implementation of configurable that knows
14// how to turn HCL configuration into a *Config object.
15type hclConfigurable struct {
16 File string
17 Root *ast.File
18}
19
20func (t *hclConfigurable) Config() (*Config, error) {
21 validKeys := map[string]struct{}{
22 "atlas": struct{}{},
23 "data": struct{}{},
24 "module": struct{}{},
25 "output": struct{}{},
26 "provider": struct{}{},
27 "resource": struct{}{},
28 "terraform": struct{}{},
29 "variable": struct{}{},
30 }
31
32 // Top-level item should be the object list
33 list, ok := t.Root.Node.(*ast.ObjectList)
34 if !ok {
35 return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
36 }
37
38 // Start building up the actual configuration.
39 config := new(Config)
40
41 // Terraform config
42 if o := list.Filter("terraform"); len(o.Items) > 0 {
43 var err error
44 config.Terraform, err = loadTerraformHcl(o)
45 if err != nil {
46 return nil, err
47 }
48 }
49
50 // Build the variables
51 if vars := list.Filter("variable"); len(vars.Items) > 0 {
52 var err error
53 config.Variables, err = loadVariablesHcl(vars)
54 if err != nil {
55 return nil, err
56 }
57 }
58
59 // Get Atlas configuration
60 if atlas := list.Filter("atlas"); len(atlas.Items) > 0 {
61 var err error
62 config.Atlas, err = loadAtlasHcl(atlas)
63 if err != nil {
64 return nil, err
65 }
66 }
67
68 // Build the modules
69 if modules := list.Filter("module"); len(modules.Items) > 0 {
70 var err error
71 config.Modules, err = loadModulesHcl(modules)
72 if err != nil {
73 return nil, err
74 }
75 }
76
77 // Build the provider configs
78 if providers := list.Filter("provider"); len(providers.Items) > 0 {
79 var err error
80 config.ProviderConfigs, err = loadProvidersHcl(providers)
81 if err != nil {
82 return nil, err
83 }
84 }
85
86 // Build the resources
87 {
88 var err error
89 managedResourceConfigs := list.Filter("resource")
90 dataResourceConfigs := list.Filter("data")
91
92 config.Resources = make(
93 []*Resource, 0,
94 len(managedResourceConfigs.Items)+len(dataResourceConfigs.Items),
95 )
96
97 managedResources, err := loadManagedResourcesHcl(managedResourceConfigs)
98 if err != nil {
99 return nil, err
100 }
101 dataResources, err := loadDataResourcesHcl(dataResourceConfigs)
102 if err != nil {
103 return nil, err
104 }
105
106 config.Resources = append(config.Resources, dataResources...)
107 config.Resources = append(config.Resources, managedResources...)
108 }
109
110 // Build the outputs
111 if outputs := list.Filter("output"); len(outputs.Items) > 0 {
112 var err error
113 config.Outputs, err = loadOutputsHcl(outputs)
114 if err != nil {
115 return nil, err
116 }
117 }
118
119 // Check for invalid keys
120 for _, item := range list.Items {
121 if len(item.Keys) == 0 {
122 // Not sure how this would happen, but let's avoid a panic
123 continue
124 }
125
126 k := item.Keys[0].Token.Value().(string)
127 if _, ok := validKeys[k]; ok {
128 continue
129 }
130
131 config.unknownKeys = append(config.unknownKeys, k)
132 }
133
134 return config, nil
135}
136
137// loadFileHcl is a fileLoaderFunc that knows how to read HCL
138// files and turn them into hclConfigurables.
139func loadFileHcl(root string) (configurable, []string, error) {
140 // Read the HCL file and prepare for parsing
141 d, err := ioutil.ReadFile(root)
142 if err != nil {
143 return nil, nil, fmt.Errorf(
144 "Error reading %s: %s", root, err)
145 }
146
147 // Parse it
148 hclRoot, err := hcl.Parse(string(d))
149 if err != nil {
150 return nil, nil, fmt.Errorf(
151 "Error parsing %s: %s", root, err)
152 }
153
154 // Start building the result
155 result := &hclConfigurable{
156 File: root,
157 Root: hclRoot,
158 }
159
160 // Dive in, find the imports. This is disabled for now since
161 // imports were removed prior to Terraform 0.1. The code is
162 // remaining here commented for historical purposes.
163 /*
164 imports := obj.Get("import")
165 if imports == nil {
166 result.Object.Ref()
167 return result, nil, nil
168 }
169
170 if imports.Type() != libucl.ObjectTypeString {
171 imports.Close()
172
173 return nil, nil, fmt.Errorf(
174 "Error in %s: all 'import' declarations should be in the format\n"+
175 "`import \"foo\"` (Got type %s)",
176 root,
177 imports.Type())
178 }
179
180 // Gather all the import paths
181 importPaths := make([]string, 0, imports.Len())
182 iter := imports.Iterate(false)
183 for imp := iter.Next(); imp != nil; imp = iter.Next() {
184 path := imp.ToString()
185 if !filepath.IsAbs(path) {
186 // Relative paths are relative to the Terraform file itself
187 dir := filepath.Dir(root)
188 path = filepath.Join(dir, path)
189 }
190
191 importPaths = append(importPaths, path)
192 imp.Close()
193 }
194 iter.Close()
195 imports.Close()
196
197 result.Object.Ref()
198 */
199
200 return result, nil, nil
201}
202
203// Given a handle to a HCL object, this transforms it into the Terraform config
204func loadTerraformHcl(list *ast.ObjectList) (*Terraform, error) {
205 if len(list.Items) > 1 {
206 return nil, fmt.Errorf("only one 'terraform' block allowed per module")
207 }
208
209 // Get our one item
210 item := list.Items[0]
211
212 // This block should have an empty top level ObjectItem. If there are keys
213 // here, it's likely because we have a flattened JSON object, and we can
214 // lift this into a nested ObjectList to decode properly.
215 if len(item.Keys) > 0 {
216 item = &ast.ObjectItem{
217 Val: &ast.ObjectType{
218 List: &ast.ObjectList{
219 Items: []*ast.ObjectItem{item},
220 },
221 },
222 }
223 }
224
225 // We need the item value as an ObjectList
226 var listVal *ast.ObjectList
227 if ot, ok := item.Val.(*ast.ObjectType); ok {
228 listVal = ot.List
229 } else {
230 return nil, fmt.Errorf("terraform block: should be an object")
231 }
232
233 // NOTE: We purposely don't validate unknown HCL keys here so that
234 // we can potentially read _future_ Terraform version config (to
235 // still be able to validate the required version).
236 //
237 // We should still keep track of unknown keys to validate later, but
238 // HCL doesn't currently support that.
239
240 var config Terraform
241 if err := hcl.DecodeObject(&config, item.Val); err != nil {
242 return nil, fmt.Errorf(
243 "Error reading terraform config: %s",
244 err)
245 }
246
247 // If we have provisioners, then parse those out
248 if os := listVal.Filter("backend"); len(os.Items) > 0 {
249 var err error
250 config.Backend, err = loadTerraformBackendHcl(os)
251 if err != nil {
252 return nil, fmt.Errorf(
253 "Error reading backend config for terraform block: %s",
254 err)
255 }
256 }
257
258 return &config, nil
259}
260
261// Loads the Backend configuration from an object list.
262func loadTerraformBackendHcl(list *ast.ObjectList) (*Backend, error) {
263 if len(list.Items) > 1 {
264 return nil, fmt.Errorf("only one 'backend' block allowed")
265 }
266
267 // Get our one item
268 item := list.Items[0]
269
270 // Verify the keys
271 if len(item.Keys) != 1 {
272 return nil, fmt.Errorf(
273 "position %s: 'backend' must be followed by exactly one string: a type",
274 item.Pos())
275 }
276
277 typ := item.Keys[0].Token.Value().(string)
278
279 // Decode the raw config
280 var config map[string]interface{}
281 if err := hcl.DecodeObject(&config, item.Val); err != nil {
282 return nil, fmt.Errorf(
283 "Error reading backend config: %s",
284 err)
285 }
286
287 rawConfig, err := NewRawConfig(config)
288 if err != nil {
289 return nil, fmt.Errorf(
290 "Error reading backend config: %s",
291 err)
292 }
293
294 b := &Backend{
295 Type: typ,
296 RawConfig: rawConfig,
297 }
298 b.Hash = b.Rehash()
299
300 return b, nil
301}
302
303// Given a handle to a HCL object, this transforms it into the Atlas
304// configuration.
305func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) {
306 if len(list.Items) > 1 {
307 return nil, fmt.Errorf("only one 'atlas' block allowed")
308 }
309
310 // Get our one item
311 item := list.Items[0]
312
313 var config AtlasConfig
314 if err := hcl.DecodeObject(&config, item.Val); err != nil {
315 return nil, fmt.Errorf(
316 "Error reading atlas config: %s",
317 err)
318 }
319
320 return &config, nil
321}
322
323// Given a handle to a HCL object, this recurses into the structure
324// and pulls out a list of modules.
325//
326// The resulting modules may not be unique, but each module
327// represents exactly one module definition in the HCL configuration.
328// We leave it up to another pass to merge them together.
329func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
330 if err := assertAllBlocksHaveNames("module", list); err != nil {
331 return nil, err
332 }
333
334 list = list.Children()
335 if len(list.Items) == 0 {
336 return nil, nil
337 }
338
339 // Where all the results will go
340 var result []*Module
341
342 // Now go over all the types and their children in order to get
343 // all of the actual resources.
344 for _, item := range list.Items {
345 k := item.Keys[0].Token.Value().(string)
346
347 var listVal *ast.ObjectList
348 if ot, ok := item.Val.(*ast.ObjectType); ok {
349 listVal = ot.List
350 } else {
351 return nil, fmt.Errorf("module '%s': should be an object", k)
352 }
353
354 var config map[string]interface{}
355 if err := hcl.DecodeObject(&config, item.Val); err != nil {
356 return nil, fmt.Errorf(
357 "Error reading config for %s: %s",
358 k,
359 err)
360 }
361
362 // Remove the fields we handle specially
363 delete(config, "source")
364
365 rawConfig, err := NewRawConfig(config)
366 if err != nil {
367 return nil, fmt.Errorf(
368 "Error reading config for %s: %s",
369 k,
370 err)
371 }
372
373 // If we have a count, then figure it out
374 var source string
375 if o := listVal.Filter("source"); len(o.Items) > 0 {
376 err = hcl.DecodeObject(&source, o.Items[0].Val)
377 if err != nil {
378 return nil, fmt.Errorf(
379 "Error parsing source for %s: %s",
380 k,
381 err)
382 }
383 }
384
385 result = append(result, &Module{
386 Name: k,
387 Source: source,
388 RawConfig: rawConfig,
389 })
390 }
391
392 return result, nil
393}
394
395// LoadOutputsHcl recurses into the given HCL object and turns
396// it into a mapping of outputs.
397func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) {
398 if err := assertAllBlocksHaveNames("output", list); err != nil {
399 return nil, err
400 }
401
402 list = list.Children()
403
404 // Go through each object and turn it into an actual result.
405 result := make([]*Output, 0, len(list.Items))
406 for _, item := range list.Items {
407 n := item.Keys[0].Token.Value().(string)
408
409 var listVal *ast.ObjectList
410 if ot, ok := item.Val.(*ast.ObjectType); ok {
411 listVal = ot.List
412 } else {
413 return nil, fmt.Errorf("output '%s': should be an object", n)
414 }
415
416 var config map[string]interface{}
417 if err := hcl.DecodeObject(&config, item.Val); err != nil {
418 return nil, err
419 }
420
421 // Delete special keys
422 delete(config, "depends_on")
423
424 rawConfig, err := NewRawConfig(config)
425 if err != nil {
426 return nil, fmt.Errorf(
427 "Error reading config for output %s: %s",
428 n,
429 err)
430 }
431
432 // If we have depends fields, then add those in
433 var dependsOn []string
434 if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
435 err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
436 if err != nil {
437 return nil, fmt.Errorf(
438 "Error reading depends_on for output %q: %s",
439 n,
440 err)
441 }
442 }
443
444 result = append(result, &Output{
445 Name: n,
446 RawConfig: rawConfig,
447 DependsOn: dependsOn,
448 })
449 }
450
451 return result, nil
452}
453
454// LoadVariablesHcl recurses into the given HCL object and turns
455// it into a list of variables.
456func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) {
457 if err := assertAllBlocksHaveNames("variable", list); err != nil {
458 return nil, err
459 }
460
461 list = list.Children()
462
463 // hclVariable is the structure each variable is decoded into
464 type hclVariable struct {
465 DeclaredType string `hcl:"type"`
466 Default interface{}
467 Description string
468 Fields []string `hcl:",decodedFields"`
469 }
470
471 // Go through each object and turn it into an actual result.
472 result := make([]*Variable, 0, len(list.Items))
473 for _, item := range list.Items {
474 // Clean up items from JSON
475 unwrapHCLObjectKeysFromJSON(item, 1)
476
477 // Verify the keys
478 if len(item.Keys) != 1 {
479 return nil, fmt.Errorf(
480 "position %s: 'variable' must be followed by exactly one strings: a name",
481 item.Pos())
482 }
483
484 n := item.Keys[0].Token.Value().(string)
485 if !NameRegexp.MatchString(n) {
486 return nil, fmt.Errorf(
487 "position %s: 'variable' name must match regular expression: %s",
488 item.Pos(), NameRegexp)
489 }
490
491 // Check for invalid keys
492 valid := []string{"type", "default", "description"}
493 if err := checkHCLKeys(item.Val, valid); err != nil {
494 return nil, multierror.Prefix(err, fmt.Sprintf(
495 "variable[%s]:", n))
496 }
497
498 // Decode into hclVariable to get typed values
499 var hclVar hclVariable
500 if err := hcl.DecodeObject(&hclVar, item.Val); err != nil {
501 return nil, err
502 }
503
504 // Defaults turn into a slice of map[string]interface{} and
505 // we need to make sure to convert that down into the
506 // proper type for Config.
507 if ms, ok := hclVar.Default.([]map[string]interface{}); ok {
508 def := make(map[string]interface{})
509 for _, m := range ms {
510 for k, v := range m {
511 def[k] = v
512 }
513 }
514
515 hclVar.Default = def
516 }
517
518 // Build the new variable and do some basic validation
519 newVar := &Variable{
520 Name: n,
521 DeclaredType: hclVar.DeclaredType,
522 Default: hclVar.Default,
523 Description: hclVar.Description,
524 }
525 if err := newVar.ValidateTypeAndDefault(); err != nil {
526 return nil, err
527 }
528
529 result = append(result, newVar)
530 }
531
532 return result, nil
533}
534
535// LoadProvidersHcl recurses into the given HCL object and turns
536// it into a mapping of provider configs.
537func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) {
538 if err := assertAllBlocksHaveNames("provider", list); err != nil {
539 return nil, err
540 }
541
542 list = list.Children()
543 if len(list.Items) == 0 {
544 return nil, nil
545 }
546
547 // Go through each object and turn it into an actual result.
548 result := make([]*ProviderConfig, 0, len(list.Items))
549 for _, item := range list.Items {
550 n := item.Keys[0].Token.Value().(string)
551
552 var listVal *ast.ObjectList
553 if ot, ok := item.Val.(*ast.ObjectType); ok {
554 listVal = ot.List
555 } else {
556 return nil, fmt.Errorf("module '%s': should be an object", n)
557 }
558
559 var config map[string]interface{}
560 if err := hcl.DecodeObject(&config, item.Val); err != nil {
561 return nil, err
562 }
563
564 delete(config, "alias")
565
566 rawConfig, err := NewRawConfig(config)
567 if err != nil {
568 return nil, fmt.Errorf(
569 "Error reading config for provider config %s: %s",
570 n,
571 err)
572 }
573
574 // If we have an alias field, then add those in
575 var alias string
576 if a := listVal.Filter("alias"); len(a.Items) > 0 {
577 err := hcl.DecodeObject(&alias, a.Items[0].Val)
578 if err != nil {
579 return nil, fmt.Errorf(
580 "Error reading alias for provider[%s]: %s",
581 n,
582 err)
583 }
584 }
585
586 result = append(result, &ProviderConfig{
587 Name: n,
588 Alias: alias,
589 RawConfig: rawConfig,
590 })
591 }
592
593 return result, nil
594}
595
596// Given a handle to a HCL object, this recurses into the structure
597// and pulls out a list of data sources.
598//
599// The resulting data sources may not be unique, but each one
600// represents exactly one data definition in the HCL configuration.
601// We leave it up to another pass to merge them together.
602func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
603 if err := assertAllBlocksHaveNames("data", list); err != nil {
604 return nil, err
605 }
606
607 list = list.Children()
608 if len(list.Items) == 0 {
609 return nil, nil
610 }
611
612 // Where all the results will go
613 var result []*Resource
614
615 // Now go over all the types and their children in order to get
616 // all of the actual resources.
617 for _, item := range list.Items {
618 if len(item.Keys) != 2 {
619 return nil, fmt.Errorf(
620 "position %s: 'data' must be followed by exactly two strings: a type and a name",
621 item.Pos())
622 }
623
624 t := item.Keys[0].Token.Value().(string)
625 k := item.Keys[1].Token.Value().(string)
626
627 var listVal *ast.ObjectList
628 if ot, ok := item.Val.(*ast.ObjectType); ok {
629 listVal = ot.List
630 } else {
631 return nil, fmt.Errorf("data sources %s[%s]: should be an object", t, k)
632 }
633
634 var config map[string]interface{}
635 if err := hcl.DecodeObject(&config, item.Val); err != nil {
636 return nil, fmt.Errorf(
637 "Error reading config for %s[%s]: %s",
638 t,
639 k,
640 err)
641 }
642
643 // Remove the fields we handle specially
644 delete(config, "depends_on")
645 delete(config, "provider")
646 delete(config, "count")
647
648 rawConfig, err := NewRawConfig(config)
649 if err != nil {
650 return nil, fmt.Errorf(
651 "Error reading config for %s[%s]: %s",
652 t,
653 k,
654 err)
655 }
656
657 // If we have a count, then figure it out
658 var count string = "1"
659 if o := listVal.Filter("count"); len(o.Items) > 0 {
660 err = hcl.DecodeObject(&count, o.Items[0].Val)
661 if err != nil {
662 return nil, fmt.Errorf(
663 "Error parsing count for %s[%s]: %s",
664 t,
665 k,
666 err)
667 }
668 }
669 countConfig, err := NewRawConfig(map[string]interface{}{
670 "count": count,
671 })
672 if err != nil {
673 return nil, err
674 }
675 countConfig.Key = "count"
676
677 // If we have depends fields, then add those in
678 var dependsOn []string
679 if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
680 err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
681 if err != nil {
682 return nil, fmt.Errorf(
683 "Error reading depends_on for %s[%s]: %s",
684 t,
685 k,
686 err)
687 }
688 }
689
690 // If we have a provider, then parse it out
691 var provider string
692 if o := listVal.Filter("provider"); len(o.Items) > 0 {
693 err := hcl.DecodeObject(&provider, o.Items[0].Val)
694 if err != nil {
695 return nil, fmt.Errorf(
696 "Error reading provider for %s[%s]: %s",
697 t,
698 k,
699 err)
700 }
701 }
702
703 result = append(result, &Resource{
704 Mode: DataResourceMode,
705 Name: k,
706 Type: t,
707 RawCount: countConfig,
708 RawConfig: rawConfig,
709 Provider: provider,
710 Provisioners: []*Provisioner{},
711 DependsOn: dependsOn,
712 Lifecycle: ResourceLifecycle{},
713 })
714 }
715
716 return result, nil
717}
718
719// Given a handle to a HCL object, this recurses into the structure
720// and pulls out a list of managed resources.
721//
722// The resulting resources may not be unique, but each resource
723// represents exactly one "resource" block in the HCL configuration.
724// We leave it up to another pass to merge them together.
725func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
726 list = list.Children()
727 if len(list.Items) == 0 {
728 return nil, nil
729 }
730
731 // Where all the results will go
732 var result []*Resource
733
734 // Now go over all the types and their children in order to get
735 // all of the actual resources.
736 for _, item := range list.Items {
737 // GH-4385: We detect a pure provisioner resource and give the user
738 // an error about how to do it cleanly.
739 if len(item.Keys) == 4 && item.Keys[2].Token.Value().(string) == "provisioner" {
740 return nil, fmt.Errorf(
741 "position %s: provisioners in a resource should be wrapped in a list\n\n"+
742 "Example: \"provisioner\": [ { \"local-exec\": ... } ]",
743 item.Pos())
744 }
745
746 // Fix up JSON input
747 unwrapHCLObjectKeysFromJSON(item, 2)
748
749 if len(item.Keys) != 2 {
750 return nil, fmt.Errorf(
751 "position %s: resource must be followed by exactly two strings, a type and a name",
752 item.Pos())
753 }
754
755 t := item.Keys[0].Token.Value().(string)
756 k := item.Keys[1].Token.Value().(string)
757
758 var listVal *ast.ObjectList
759 if ot, ok := item.Val.(*ast.ObjectType); ok {
760 listVal = ot.List
761 } else {
762 return nil, fmt.Errorf("resources %s[%s]: should be an object", t, k)
763 }
764
765 var config map[string]interface{}
766 if err := hcl.DecodeObject(&config, item.Val); err != nil {
767 return nil, fmt.Errorf(
768 "Error reading config for %s[%s]: %s",
769 t,
770 k,
771 err)
772 }
773
774 // Remove the fields we handle specially
775 delete(config, "connection")
776 delete(config, "count")
777 delete(config, "depends_on")
778 delete(config, "provisioner")
779 delete(config, "provider")
780 delete(config, "lifecycle")
781
782 rawConfig, err := NewRawConfig(config)
783 if err != nil {
784 return nil, fmt.Errorf(
785 "Error reading config for %s[%s]: %s",
786 t,
787 k,
788 err)
789 }
790
791 // If we have a count, then figure it out
792 var count string = "1"
793 if o := listVal.Filter("count"); len(o.Items) > 0 {
794 err = hcl.DecodeObject(&count, o.Items[0].Val)
795 if err != nil {
796 return nil, fmt.Errorf(
797 "Error parsing count for %s[%s]: %s",
798 t,
799 k,
800 err)
801 }
802 }
803 countConfig, err := NewRawConfig(map[string]interface{}{
804 "count": count,
805 })
806 if err != nil {
807 return nil, err
808 }
809 countConfig.Key = "count"
810
811 // If we have depends fields, then add those in
812 var dependsOn []string
813 if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
814 err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
815 if err != nil {
816 return nil, fmt.Errorf(
817 "Error reading depends_on for %s[%s]: %s",
818 t,
819 k,
820 err)
821 }
822 }
823
824 // If we have connection info, then parse those out
825 var connInfo map[string]interface{}
826 if o := listVal.Filter("connection"); len(o.Items) > 0 {
827 err := hcl.DecodeObject(&connInfo, o.Items[0].Val)
828 if err != nil {
829 return nil, fmt.Errorf(
830 "Error reading connection info for %s[%s]: %s",
831 t,
832 k,
833 err)
834 }
835 }
836
837 // If we have provisioners, then parse those out
838 var provisioners []*Provisioner
839 if os := listVal.Filter("provisioner"); len(os.Items) > 0 {
840 var err error
841 provisioners, err = loadProvisionersHcl(os, connInfo)
842 if err != nil {
843 return nil, fmt.Errorf(
844 "Error reading provisioners for %s[%s]: %s",
845 t,
846 k,
847 err)
848 }
849 }
850
851 // If we have a provider, then parse it out
852 var provider string
853 if o := listVal.Filter("provider"); len(o.Items) > 0 {
854 err := hcl.DecodeObject(&provider, o.Items[0].Val)
855 if err != nil {
856 return nil, fmt.Errorf(
857 "Error reading provider for %s[%s]: %s",
858 t,
859 k,
860 err)
861 }
862 }
863
864 // Check if the resource should be re-created before
865 // destroying the existing instance
866 var lifecycle ResourceLifecycle
867 if o := listVal.Filter("lifecycle"); len(o.Items) > 0 {
868 if len(o.Items) > 1 {
869 return nil, fmt.Errorf(
870 "%s[%s]: Multiple lifecycle blocks found, expected one",
871 t, k)
872 }
873
874 // Check for invalid keys
875 valid := []string{"create_before_destroy", "ignore_changes", "prevent_destroy"}
876 if err := checkHCLKeys(o.Items[0].Val, valid); err != nil {
877 return nil, multierror.Prefix(err, fmt.Sprintf(
878 "%s[%s]:", t, k))
879 }
880
881 var raw map[string]interface{}
882 if err = hcl.DecodeObject(&raw, o.Items[0].Val); err != nil {
883 return nil, fmt.Errorf(
884 "Error parsing lifecycle for %s[%s]: %s",
885 t,
886 k,
887 err)
888 }
889
890 if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil {
891 return nil, fmt.Errorf(
892 "Error parsing lifecycle for %s[%s]: %s",
893 t,
894 k,
895 err)
896 }
897 }
898
899 result = append(result, &Resource{
900 Mode: ManagedResourceMode,
901 Name: k,
902 Type: t,
903 RawCount: countConfig,
904 RawConfig: rawConfig,
905 Provisioners: provisioners,
906 Provider: provider,
907 DependsOn: dependsOn,
908 Lifecycle: lifecycle,
909 })
910 }
911
912 return result, nil
913}
914
915func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) {
916 if err := assertAllBlocksHaveNames("provisioner", list); err != nil {
917 return nil, err
918 }
919
920 list = list.Children()
921 if len(list.Items) == 0 {
922 return nil, nil
923 }
924
925 // Go through each object and turn it into an actual result.
926 result := make([]*Provisioner, 0, len(list.Items))
927 for _, item := range list.Items {
928 n := item.Keys[0].Token.Value().(string)
929
930 var listVal *ast.ObjectList
931 if ot, ok := item.Val.(*ast.ObjectType); ok {
932 listVal = ot.List
933 } else {
934 return nil, fmt.Errorf("provisioner '%s': should be an object", n)
935 }
936
937 var config map[string]interface{}
938 if err := hcl.DecodeObject(&config, item.Val); err != nil {
939 return nil, err
940 }
941
942 // Parse the "when" value
943 when := ProvisionerWhenCreate
944 if v, ok := config["when"]; ok {
945 switch v {
946 case "create":
947 when = ProvisionerWhenCreate
948 case "destroy":
949 when = ProvisionerWhenDestroy
950 default:
951 return nil, fmt.Errorf(
952 "position %s: 'provisioner' when must be 'create' or 'destroy'",
953 item.Pos())
954 }
955 }
956
957 // Parse the "on_failure" value
958 onFailure := ProvisionerOnFailureFail
959 if v, ok := config["on_failure"]; ok {
960 switch v {
961 case "continue":
962 onFailure = ProvisionerOnFailureContinue
963 case "fail":
964 onFailure = ProvisionerOnFailureFail
965 default:
966 return nil, fmt.Errorf(
967 "position %s: 'provisioner' on_failure must be 'continue' or 'fail'",
968 item.Pos())
969 }
970 }
971
972 // Delete fields we special case
973 delete(config, "connection")
974 delete(config, "when")
975 delete(config, "on_failure")
976
977 rawConfig, err := NewRawConfig(config)
978 if err != nil {
979 return nil, err
980 }
981
982 // Check if we have a provisioner-level connection
983 // block that overrides the resource-level
984 var subConnInfo map[string]interface{}
985 if o := listVal.Filter("connection"); len(o.Items) > 0 {
986 err := hcl.DecodeObject(&subConnInfo, o.Items[0].Val)
987 if err != nil {
988 return nil, err
989 }
990 }
991
992 // Inherit from the resource connInfo any keys
993 // that are not explicitly overriden.
994 if connInfo != nil && subConnInfo != nil {
995 for k, v := range connInfo {
996 if _, ok := subConnInfo[k]; !ok {
997 subConnInfo[k] = v
998 }
999 }
1000 } else if subConnInfo == nil {
1001 subConnInfo = connInfo
1002 }
1003
1004 // Parse the connInfo
1005 connRaw, err := NewRawConfig(subConnInfo)
1006 if err != nil {
1007 return nil, err
1008 }
1009
1010 result = append(result, &Provisioner{
1011 Type: n,
1012 RawConfig: rawConfig,
1013 ConnInfo: connRaw,
1014 When: when,
1015 OnFailure: onFailure,
1016 })
1017 }
1018
1019 return result, nil
1020}
1021
1022/*
1023func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode {
1024 objects := make(map[string][]*hclobj.Object)
1025
1026 for _, o := range os.Elem(false) {
1027 for _, elem := range o.Elem(true) {
1028 val, ok := objects[elem.Key]
1029 if !ok {
1030 val = make([]*hclobj.Object, 0, 1)
1031 }
1032
1033 val = append(val, elem)
1034 objects[elem.Key] = val
1035 }
1036 }
1037
1038 return objects
1039}
1040*/
1041
1042// assertAllBlocksHaveNames returns an error if any of the items in
1043// the given object list are blocks without keys (like "module {}")
1044// or simple assignments (like "module = 1"). It returns nil if
1045// neither of these things are true.
1046//
1047// The given name is used in any generated error messages, and should
1048// be the name of the block we're dealing with. The given list should
1049// be the result of calling .Filter on an object list with that same
1050// name.
1051func assertAllBlocksHaveNames(name string, list *ast.ObjectList) error {
1052 if elem := list.Elem(); len(elem.Items) != 0 {
1053 switch et := elem.Items[0].Val.(type) {
1054 case *ast.ObjectType:
1055 pos := et.Lbrace
1056 return fmt.Errorf("%s: %q must be followed by a name", pos, name)
1057 default:
1058 pos := elem.Items[0].Val.Pos()
1059 return fmt.Errorf("%s: %q must be a configuration block", pos, name)
1060 }
1061 }
1062 return nil
1063}
1064
1065func checkHCLKeys(node ast.Node, valid []string) error {
1066 var list *ast.ObjectList
1067 switch n := node.(type) {
1068 case *ast.ObjectList:
1069 list = n
1070 case *ast.ObjectType:
1071 list = n.List
1072 default:
1073 return fmt.Errorf("cannot check HCL keys of type %T", n)
1074 }
1075
1076 validMap := make(map[string]struct{}, len(valid))
1077 for _, v := range valid {
1078 validMap[v] = struct{}{}
1079 }
1080
1081 var result error
1082 for _, item := range list.Items {
1083 key := item.Keys[0].Token.Value().(string)
1084 if _, ok := validMap[key]; !ok {
1085 result = multierror.Append(result, fmt.Errorf(
1086 "invalid key: %s", key))
1087 }
1088 }
1089
1090 return result
1091}
1092
1093// unwrapHCLObjectKeysFromJSON cleans up an edge case that can occur when
1094// parsing JSON as input: if we're parsing JSON then directly nested
1095// items will show up as additional "keys".
1096//
1097// For objects that expect a fixed number of keys, this breaks the
1098// decoding process. This function unwraps the object into what it would've
1099// looked like if it came directly from HCL by specifying the number of keys
1100// you expect.
1101//
1102// Example:
1103//
1104// { "foo": { "baz": {} } }
1105//
1106// Will show up with Keys being: []string{"foo", "baz"}
1107// when we really just want the first two. This function will fix this.
1108func unwrapHCLObjectKeysFromJSON(item *ast.ObjectItem, depth int) {
1109 if len(item.Keys) > depth && item.Keys[0].Token.JSON {
1110 for len(item.Keys) > depth {
1111 // Pop off the last key
1112 n := len(item.Keys)
1113 key := item.Keys[n-1]
1114 item.Keys[n-1] = nil
1115 item.Keys = item.Keys[:n-1]
1116
1117 // Wrap our value in a list
1118 item.Val = &ast.ObjectType{
1119 List: &ast.ObjectList{
1120 Items: []*ast.ObjectItem{
1121 &ast.ObjectItem{
1122 Keys: []*ast.ObjectKey{key},
1123 Val: item.Val,
1124 },
1125 },
1126 },
1127 }
1128 }
1129 }
1130}
diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go
new file mode 100644
index 0000000..db214be
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/merge.go
@@ -0,0 +1,193 @@
1package config
2
3// Merge merges two configurations into a single configuration.
4//
5// Merge allows for the two configurations to have duplicate resources,
6// because the resources will be merged. This differs from a single
7// Config which must only have unique resources.
8func Merge(c1, c2 *Config) (*Config, error) {
9 c := new(Config)
10
11 // Merge unknown keys
12 unknowns := make(map[string]struct{})
13 for _, k := range c1.unknownKeys {
14 _, present := unknowns[k]
15 if !present {
16 unknowns[k] = struct{}{}
17 c.unknownKeys = append(c.unknownKeys, k)
18 }
19 }
20 for _, k := range c2.unknownKeys {
21 _, present := unknowns[k]
22 if !present {
23 unknowns[k] = struct{}{}
24 c.unknownKeys = append(c.unknownKeys, k)
25 }
26 }
27
28 // Merge Atlas configuration. This is a dumb one overrides the other
29 // sort of merge.
30 c.Atlas = c1.Atlas
31 if c2.Atlas != nil {
32 c.Atlas = c2.Atlas
33 }
34
35 // Merge the Terraform configuration
36 if c1.Terraform != nil {
37 c.Terraform = c1.Terraform
38 if c2.Terraform != nil {
39 c.Terraform.Merge(c2.Terraform)
40 }
41 } else {
42 c.Terraform = c2.Terraform
43 }
44
45 // NOTE: Everything below is pretty gross. Due to the lack of generics
46 // in Go, there is some hoop-jumping involved to make this merging a
47 // little more test-friendly and less repetitive. Ironically, making it
48 // less repetitive involves being a little repetitive, but I prefer to
49 // be repetitive with things that are less error prone than things that
50 // are more error prone (more logic). Type conversions to an interface
51 // are pretty low-error.
52
53 var m1, m2, mresult []merger
54
55 // Modules
56 m1 = make([]merger, 0, len(c1.Modules))
57 m2 = make([]merger, 0, len(c2.Modules))
58 for _, v := range c1.Modules {
59 m1 = append(m1, v)
60 }
61 for _, v := range c2.Modules {
62 m2 = append(m2, v)
63 }
64 mresult = mergeSlice(m1, m2)
65 if len(mresult) > 0 {
66 c.Modules = make([]*Module, len(mresult))
67 for i, v := range mresult {
68 c.Modules[i] = v.(*Module)
69 }
70 }
71
72 // Outputs
73 m1 = make([]merger, 0, len(c1.Outputs))
74 m2 = make([]merger, 0, len(c2.Outputs))
75 for _, v := range c1.Outputs {
76 m1 = append(m1, v)
77 }
78 for _, v := range c2.Outputs {
79 m2 = append(m2, v)
80 }
81 mresult = mergeSlice(m1, m2)
82 if len(mresult) > 0 {
83 c.Outputs = make([]*Output, len(mresult))
84 for i, v := range mresult {
85 c.Outputs[i] = v.(*Output)
86 }
87 }
88
89 // Provider Configs
90 m1 = make([]merger, 0, len(c1.ProviderConfigs))
91 m2 = make([]merger, 0, len(c2.ProviderConfigs))
92 for _, v := range c1.ProviderConfigs {
93 m1 = append(m1, v)
94 }
95 for _, v := range c2.ProviderConfigs {
96 m2 = append(m2, v)
97 }
98 mresult = mergeSlice(m1, m2)
99 if len(mresult) > 0 {
100 c.ProviderConfigs = make([]*ProviderConfig, len(mresult))
101 for i, v := range mresult {
102 c.ProviderConfigs[i] = v.(*ProviderConfig)
103 }
104 }
105
106 // Resources
107 m1 = make([]merger, 0, len(c1.Resources))
108 m2 = make([]merger, 0, len(c2.Resources))
109 for _, v := range c1.Resources {
110 m1 = append(m1, v)
111 }
112 for _, v := range c2.Resources {
113 m2 = append(m2, v)
114 }
115 mresult = mergeSlice(m1, m2)
116 if len(mresult) > 0 {
117 c.Resources = make([]*Resource, len(mresult))
118 for i, v := range mresult {
119 c.Resources[i] = v.(*Resource)
120 }
121 }
122
123 // Variables
124 m1 = make([]merger, 0, len(c1.Variables))
125 m2 = make([]merger, 0, len(c2.Variables))
126 for _, v := range c1.Variables {
127 m1 = append(m1, v)
128 }
129 for _, v := range c2.Variables {
130 m2 = append(m2, v)
131 }
132 mresult = mergeSlice(m1, m2)
133 if len(mresult) > 0 {
134 c.Variables = make([]*Variable, len(mresult))
135 for i, v := range mresult {
136 c.Variables[i] = v.(*Variable)
137 }
138 }
139
140 return c, nil
141}
142
143// merger is an interface that must be implemented by types that are
144// merge-able. This simplifies the implementation of Merge for the various
145// components of a Config.
146type merger interface {
147 mergerName() string
148 mergerMerge(merger) merger
149}
150
151// mergeSlice merges a slice of mergers.
152func mergeSlice(m1, m2 []merger) []merger {
153 r := make([]merger, len(m1), len(m1)+len(m2))
154 copy(r, m1)
155
156 m := map[string]struct{}{}
157 for _, v2 := range m2 {
158 // If we already saw it, just append it because its a
159 // duplicate and invalid...
160 name := v2.mergerName()
161 if _, ok := m[name]; ok {
162 r = append(r, v2)
163 continue
164 }
165 m[name] = struct{}{}
166
167 // Find an original to override
168 var original merger
169 originalIndex := -1
170 for i, v := range m1 {
171 if v.mergerName() == name {
172 originalIndex = i
173 original = v
174 break
175 }
176 }
177
178 var v merger
179 if original == nil {
180 v = v2
181 } else {
182 v = original.mergerMerge(v2)
183 }
184
185 if originalIndex == -1 {
186 r = append(r, v)
187 } else {
188 r[originalIndex] = v
189 }
190 }
191
192 return r
193}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go
new file mode 100644
index 0000000..095f61d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go
@@ -0,0 +1,114 @@
1package module
2
3import (
4 "io"
5 "os"
6 "path/filepath"
7 "strings"
8)
9
10// copyDir copies the src directory contents into dst. Both directories
11// should already exist.
12func copyDir(dst, src string) error {
13 src, err := filepath.EvalSymlinks(src)
14 if err != nil {
15 return err
16 }
17
18 walkFn := func(path string, info os.FileInfo, err error) error {
19 if err != nil {
20 return err
21 }
22
23 if path == src {
24 return nil
25 }
26
27 if strings.HasPrefix(filepath.Base(path), ".") {
28 // Skip any dot files
29 if info.IsDir() {
30 return filepath.SkipDir
31 } else {
32 return nil
33 }
34 }
35
36 // The "path" has the src prefixed to it. We need to join our
37 // destination with the path without the src on it.
38 dstPath := filepath.Join(dst, path[len(src):])
39
40 // we don't want to try and copy the same file over itself.
41 if eq, err := sameFile(path, dstPath); eq {
42 return nil
43 } else if err != nil {
44 return err
45 }
46
47 // If we have a directory, make that subdirectory, then continue
48 // the walk.
49 if info.IsDir() {
50 if path == filepath.Join(src, dst) {
51 // dst is in src; don't walk it.
52 return nil
53 }
54
55 if err := os.MkdirAll(dstPath, 0755); err != nil {
56 return err
57 }
58
59 return nil
60 }
61
62 // If we have a file, copy the contents.
63 srcF, err := os.Open(path)
64 if err != nil {
65 return err
66 }
67 defer srcF.Close()
68
69 dstF, err := os.Create(dstPath)
70 if err != nil {
71 return err
72 }
73 defer dstF.Close()
74
75 if _, err := io.Copy(dstF, srcF); err != nil {
76 return err
77 }
78
79 // Chmod it
80 return os.Chmod(dstPath, info.Mode())
81 }
82
83 return filepath.Walk(src, walkFn)
84}
85
86// sameFile tried to determine if to paths are the same file.
87// If the paths don't match, we lookup the inode on supported systems.
88func sameFile(a, b string) (bool, error) {
89 if a == b {
90 return true, nil
91 }
92
93 aIno, err := inode(a)
94 if err != nil {
95 if os.IsNotExist(err) {
96 return false, nil
97 }
98 return false, err
99 }
100
101 bIno, err := inode(b)
102 if err != nil {
103 if os.IsNotExist(err) {
104 return false, nil
105 }
106 return false, err
107 }
108
109 if aIno > 0 && aIno == bIno {
110 return true, nil
111 }
112
113 return false, nil
114}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/get.go b/vendor/github.com/hashicorp/terraform/config/module/get.go
new file mode 100644
index 0000000..96b4a63
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/get.go
@@ -0,0 +1,71 @@
1package module
2
3import (
4 "io/ioutil"
5 "os"
6
7 "github.com/hashicorp/go-getter"
8)
9
10// GetMode is an enum that describes how modules are loaded.
11//
12// GetModeLoad says that modules will not be downloaded or updated, they will
13// only be loaded from the storage.
14//
15// GetModeGet says that modules can be initially downloaded if they don't
16// exist, but otherwise to just load from the current version in storage.
17//
18// GetModeUpdate says that modules should be checked for updates and
19// downloaded prior to loading. If there are no updates, we load the version
20// from disk, otherwise we download first and then load.
21type GetMode byte
22
23const (
24 GetModeNone GetMode = iota
25 GetModeGet
26 GetModeUpdate
27)
28
29// GetCopy is the same as Get except that it downloads a copy of the
30// module represented by source.
31//
32// This copy will omit and dot-prefixed files (such as .git/, .hg/) and
33// can't be updated on its own.
34func GetCopy(dst, src string) error {
35 // Create the temporary directory to do the real Get to
36 tmpDir, err := ioutil.TempDir("", "tf")
37 if err != nil {
38 return err
39 }
40 // FIXME: This isn't completely safe. Creating and removing our temp path
41 // exposes where to race to inject files.
42 if err := os.RemoveAll(tmpDir); err != nil {
43 return err
44 }
45 defer os.RemoveAll(tmpDir)
46
47 // Get to that temporary dir
48 if err := getter.Get(tmpDir, src); err != nil {
49 return err
50 }
51
52 // Make sure the destination exists
53 if err := os.MkdirAll(dst, 0755); err != nil {
54 return err
55 }
56
57 // Copy to the final location
58 return copyDir(dst, tmpDir)
59}
60
61func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) {
62 // Get the module with the level specified if we were told to.
63 if mode > GetModeNone {
64 if err := s.Get(key, src, mode == GetModeUpdate); err != nil {
65 return "", false, err
66 }
67 }
68
69 // Get the directory where the module is.
70 return s.Dir(key)
71}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode.go b/vendor/github.com/hashicorp/terraform/config/module/inode.go
new file mode 100644
index 0000000..8603ee2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/inode.go
@@ -0,0 +1,21 @@
1// +build linux darwin openbsd netbsd solaris
2
3package module
4
5import (
6 "fmt"
7 "os"
8 "syscall"
9)
10
11// lookup the inode of a file on posix systems
12func inode(path string) (uint64, error) {
13 stat, err := os.Stat(path)
14 if err != nil {
15 return 0, err
16 }
17 if st, ok := stat.Sys().(*syscall.Stat_t); ok {
18 return st.Ino, nil
19 }
20 return 0, fmt.Errorf("could not determine file inode")
21}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go
new file mode 100644
index 0000000..0d95730
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go
@@ -0,0 +1,21 @@
1// +build freebsd
2
3package module
4
5import (
6 "fmt"
7 "os"
8 "syscall"
9)
10
11// lookup the inode of a file on posix systems
12func inode(path string) (uint64, error) {
13 stat, err := os.Stat(path)
14 if err != nil {
15 return 0, err
16 }
17 if st, ok := stat.Sys().(*syscall.Stat_t); ok {
18 return uint64(st.Ino), nil
19 }
20 return 0, fmt.Errorf("could not determine file inode")
21}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go
new file mode 100644
index 0000000..c0cf455
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go
@@ -0,0 +1,8 @@
1// +build windows
2
3package module
4
5// no syscall.Stat_t on windows, return 0 for inodes
6func inode(path string) (uint64, error) {
7 return 0, nil
8}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/module.go b/vendor/github.com/hashicorp/terraform/config/module/module.go
new file mode 100644
index 0000000..f8649f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/module.go
@@ -0,0 +1,7 @@
1package module
2
3// Module represents the metadata for a single module.
4type Module struct {
5 Name string
6 Source string
7}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/testing.go b/vendor/github.com/hashicorp/terraform/config/module/testing.go
new file mode 100644
index 0000000..fc9e733
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/testing.go
@@ -0,0 +1,38 @@
1package module
2
3import (
4 "io/ioutil"
5 "os"
6 "testing"
7
8 "github.com/hashicorp/go-getter"
9)
10
11// TestTree loads a module at the given path and returns the tree as well
12// as a function that should be deferred to clean up resources.
13func TestTree(t *testing.T, path string) (*Tree, func()) {
14 // Create a temporary directory for module storage
15 dir, err := ioutil.TempDir("", "tf")
16 if err != nil {
17 t.Fatalf("err: %s", err)
18 return nil, nil
19 }
20
21 // Load the module
22 mod, err := NewTreeModule("", path)
23 if err != nil {
24 t.Fatalf("err: %s", err)
25 return nil, nil
26 }
27
28 // Get the child modules
29 s := &getter.FolderStorage{StorageDir: dir}
30 if err := mod.Load(s, GetModeGet); err != nil {
31 t.Fatalf("err: %s", err)
32 return nil, nil
33 }
34
35 return mod, func() {
36 os.RemoveAll(dir)
37 }
38}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go
new file mode 100644
index 0000000..b6f90fd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/tree.go
@@ -0,0 +1,428 @@
1package module
2
3import (
4 "bufio"
5 "bytes"
6 "fmt"
7 "path/filepath"
8 "strings"
9 "sync"
10
11 "github.com/hashicorp/go-getter"
12 "github.com/hashicorp/terraform/config"
13)
14
15// RootName is the name of the root tree.
16const RootName = "root"
17
18// Tree represents the module import tree of configurations.
19//
20// This Tree structure can be used to get (download) new modules, load
21// all the modules without getting, flatten the tree into something
22// Terraform can use, etc.
23type Tree struct {
24 name string
25 config *config.Config
26 children map[string]*Tree
27 path []string
28 lock sync.RWMutex
29}
30
31// NewTree returns a new Tree for the given config structure.
32func NewTree(name string, c *config.Config) *Tree {
33 return &Tree{config: c, name: name}
34}
35
36// NewEmptyTree returns a new tree that is empty (contains no configuration).
37func NewEmptyTree() *Tree {
38 t := &Tree{config: &config.Config{}}
39
40 // We do this dummy load so that the tree is marked as "loaded". It
41 // should never fail because this is just about a no-op. If it does fail
42 // we panic so we can know its a bug.
43 if err := t.Load(nil, GetModeGet); err != nil {
44 panic(err)
45 }
46
47 return t
48}
49
50// NewTreeModule is like NewTree except it parses the configuration in
51// the directory and gives it a specific name. Use a blank name "" to specify
52// the root module.
53func NewTreeModule(name, dir string) (*Tree, error) {
54 c, err := config.LoadDir(dir)
55 if err != nil {
56 return nil, err
57 }
58
59 return NewTree(name, c), nil
60}
61
62// Config returns the configuration for this module.
63func (t *Tree) Config() *config.Config {
64 return t.config
65}
66
67// Child returns the child with the given path (by name).
68func (t *Tree) Child(path []string) *Tree {
69 if t == nil {
70 return nil
71 }
72
73 if len(path) == 0 {
74 return t
75 }
76
77 c := t.Children()[path[0]]
78 if c == nil {
79 return nil
80 }
81
82 return c.Child(path[1:])
83}
84
85// Children returns the children of this tree (the modules that are
86// imported by this root).
87//
88// This will only return a non-nil value after Load is called.
89func (t *Tree) Children() map[string]*Tree {
90 t.lock.RLock()
91 defer t.lock.RUnlock()
92 return t.children
93}
94
95// Loaded says whether or not this tree has been loaded or not yet.
96func (t *Tree) Loaded() bool {
97 t.lock.RLock()
98 defer t.lock.RUnlock()
99 return t.children != nil
100}
101
102// Modules returns the list of modules that this tree imports.
103//
104// This is only the imports of _this_ level of the tree. To retrieve the
105// full nested imports, you'll have to traverse the tree.
106func (t *Tree) Modules() []*Module {
107 result := make([]*Module, len(t.config.Modules))
108 for i, m := range t.config.Modules {
109 result[i] = &Module{
110 Name: m.Name,
111 Source: m.Source,
112 }
113 }
114
115 return result
116}
117
118// Name returns the name of the tree. This will be "<root>" for the root
119// tree and then the module name given for any children.
120func (t *Tree) Name() string {
121 if t.name == "" {
122 return RootName
123 }
124
125 return t.name
126}
127
128// Load loads the configuration of the entire tree.
129//
130// The parameters are used to tell the tree where to find modules and
131// whether it can download/update modules along the way.
132//
133// Calling this multiple times will reload the tree.
134//
135// Various semantic-like checks are made along the way of loading since
136// module trees inherently require the configuration to be in a reasonably
137// sane state: no circular dependencies, proper module sources, etc. A full
138// suite of validations can be done by running Validate (after loading).
139func (t *Tree) Load(s getter.Storage, mode GetMode) error {
140 t.lock.Lock()
141 defer t.lock.Unlock()
142
143 // Reset the children if we have any
144 t.children = nil
145
146 modules := t.Modules()
147 children := make(map[string]*Tree)
148
149 // Go through all the modules and get the directory for them.
150 for _, m := range modules {
151 if _, ok := children[m.Name]; ok {
152 return fmt.Errorf(
153 "module %s: duplicated. module names must be unique", m.Name)
154 }
155
156 // Determine the path to this child
157 path := make([]string, len(t.path), len(t.path)+1)
158 copy(path, t.path)
159 path = append(path, m.Name)
160
161 // Split out the subdir if we have one
162 source, subDir := getter.SourceDirSubdir(m.Source)
163
164 source, err := getter.Detect(source, t.config.Dir, getter.Detectors)
165 if err != nil {
166 return fmt.Errorf("module %s: %s", m.Name, err)
167 }
168
169 // Check if the detector introduced something new.
170 source, subDir2 := getter.SourceDirSubdir(source)
171 if subDir2 != "" {
172 subDir = filepath.Join(subDir2, subDir)
173 }
174
175 // Get the directory where this module is so we can load it
176 key := strings.Join(path, ".")
177 key = fmt.Sprintf("root.%s-%s", key, m.Source)
178 dir, ok, err := getStorage(s, key, source, mode)
179 if err != nil {
180 return err
181 }
182 if !ok {
183 return fmt.Errorf(
184 "module %s: not found, may need to be downloaded using 'terraform get'", m.Name)
185 }
186
187 // If we have a subdirectory, then merge that in
188 if subDir != "" {
189 dir = filepath.Join(dir, subDir)
190 }
191
192 // Load the configurations.Dir(source)
193 children[m.Name], err = NewTreeModule(m.Name, dir)
194 if err != nil {
195 return fmt.Errorf(
196 "module %s: %s", m.Name, err)
197 }
198
199 // Set the path of this child
200 children[m.Name].path = path
201 }
202
203 // Go through all the children and load them.
204 for _, c := range children {
205 if err := c.Load(s, mode); err != nil {
206 return err
207 }
208 }
209
210 // Set our tree up
211 t.children = children
212
213 return nil
214}
215
216// Path is the full path to this tree.
217func (t *Tree) Path() []string {
218 return t.path
219}
220
221// String gives a nice output to describe the tree.
222func (t *Tree) String() string {
223 var result bytes.Buffer
224 path := strings.Join(t.path, ", ")
225 if path != "" {
226 path = fmt.Sprintf(" (path: %s)", path)
227 }
228 result.WriteString(t.Name() + path + "\n")
229
230 cs := t.Children()
231 if cs == nil {
232 result.WriteString(" not loaded")
233 } else {
234 // Go through each child and get its string value, then indent it
235 // by two.
236 for _, c := range cs {
237 r := strings.NewReader(c.String())
238 scanner := bufio.NewScanner(r)
239 for scanner.Scan() {
240 result.WriteString(" ")
241 result.WriteString(scanner.Text())
242 result.WriteString("\n")
243 }
244 }
245 }
246
247 return result.String()
248}
249
250// Validate does semantic checks on the entire tree of configurations.
251//
252// This will call the respective config.Config.Validate() functions as well
253// as verifying things such as parameters/outputs between the various modules.
254//
255// Load must be called prior to calling Validate or an error will be returned.
256func (t *Tree) Validate() error {
257 if !t.Loaded() {
258 return fmt.Errorf("tree must be loaded before calling Validate")
259 }
260
261 // If something goes wrong, here is our error template
262 newErr := &treeError{Name: []string{t.Name()}}
263
264 // Terraform core does not handle root module children named "root".
265 // We plan to fix this in the future but this bug was brought up in
266 // the middle of a release and we don't want to introduce wide-sweeping
267 // changes at that time.
268 if len(t.path) == 1 && t.name == "root" {
269 return fmt.Errorf("root module cannot contain module named 'root'")
270 }
271
272 // Validate our configuration first.
273 if err := t.config.Validate(); err != nil {
274 newErr.Add(err)
275 }
276
277 // If we're the root, we do extra validation. This validation usually
278 // requires the entire tree (since children don't have parent pointers).
279 if len(t.path) == 0 {
280 if err := t.validateProviderAlias(); err != nil {
281 newErr.Add(err)
282 }
283 }
284
285 // Get the child trees
286 children := t.Children()
287
288 // Validate all our children
289 for _, c := range children {
290 err := c.Validate()
291 if err == nil {
292 continue
293 }
294
295 verr, ok := err.(*treeError)
296 if !ok {
297 // Unknown error, just return...
298 return err
299 }
300
301 // Append ourselves to the error and then return
302 verr.Name = append(verr.Name, t.Name())
303 newErr.AddChild(verr)
304 }
305
306 // Go over all the modules and verify that any parameters are valid
307 // variables into the module in question.
308 for _, m := range t.config.Modules {
309 tree, ok := children[m.Name]
310 if !ok {
311 // This should never happen because Load watches us
312 panic("module not found in children: " + m.Name)
313 }
314
315 // Build the variables that the module defines
316 requiredMap := make(map[string]struct{})
317 varMap := make(map[string]struct{})
318 for _, v := range tree.config.Variables {
319 varMap[v.Name] = struct{}{}
320
321 if v.Required() {
322 requiredMap[v.Name] = struct{}{}
323 }
324 }
325
326 // Compare to the keys in our raw config for the module
327 for k, _ := range m.RawConfig.Raw {
328 if _, ok := varMap[k]; !ok {
329 newErr.Add(fmt.Errorf(
330 "module %s: %s is not a valid parameter",
331 m.Name, k))
332 }
333
334 // Remove the required
335 delete(requiredMap, k)
336 }
337
338 // If we have any required left over, they aren't set.
339 for k, _ := range requiredMap {
340 newErr.Add(fmt.Errorf(
341 "module %s: required variable %q not set",
342 m.Name, k))
343 }
344 }
345
346 // Go over all the variables used and make sure that any module
347 // variables represent outputs properly.
348 for source, vs := range t.config.InterpolatedVariables() {
349 for _, v := range vs {
350 mv, ok := v.(*config.ModuleVariable)
351 if !ok {
352 continue
353 }
354
355 tree, ok := children[mv.Name]
356 if !ok {
357 newErr.Add(fmt.Errorf(
358 "%s: undefined module referenced %s",
359 source, mv.Name))
360 continue
361 }
362
363 found := false
364 for _, o := range tree.config.Outputs {
365 if o.Name == mv.Field {
366 found = true
367 break
368 }
369 }
370 if !found {
371 newErr.Add(fmt.Errorf(
372 "%s: %s is not a valid output for module %s",
373 source, mv.Field, mv.Name))
374 }
375 }
376 }
377
378 return newErr.ErrOrNil()
379}
380
381// treeError is an error use by Tree.Validate to accumulates all
382// validation errors.
383type treeError struct {
384 Name []string
385 Errs []error
386 Children []*treeError
387}
388
389func (e *treeError) Add(err error) {
390 e.Errs = append(e.Errs, err)
391}
392
393func (e *treeError) AddChild(err *treeError) {
394 e.Children = append(e.Children, err)
395}
396
397func (e *treeError) ErrOrNil() error {
398 if len(e.Errs) > 0 || len(e.Children) > 0 {
399 return e
400 }
401 return nil
402}
403
404func (e *treeError) Error() string {
405 name := strings.Join(e.Name, ".")
406 var out bytes.Buffer
407 fmt.Fprintf(&out, "module %s: ", name)
408
409 if len(e.Errs) == 1 {
410 // single like error
411 out.WriteString(e.Errs[0].Error())
412 } else {
413 // multi-line error
414 for _, err := range e.Errs {
415 fmt.Fprintf(&out, "\n %s", err)
416 }
417 }
418
419 if len(e.Children) > 0 {
420 // start the next error on a new line
421 out.WriteString("\n ")
422 }
423 for _, child := range e.Children {
424 out.WriteString(child.Error())
425 }
426
427 return out.String()
428}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go
new file mode 100644
index 0000000..fcd37f4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go
@@ -0,0 +1,57 @@
1package module
2
3import (
4 "bytes"
5 "encoding/gob"
6
7 "github.com/hashicorp/terraform/config"
8)
9
10func (t *Tree) GobDecode(bs []byte) error {
11 t.lock.Lock()
12 defer t.lock.Unlock()
13
14 // Decode the gob data
15 var data treeGob
16 dec := gob.NewDecoder(bytes.NewReader(bs))
17 if err := dec.Decode(&data); err != nil {
18 return err
19 }
20
21 // Set the fields
22 t.name = data.Name
23 t.config = data.Config
24 t.children = data.Children
25 t.path = data.Path
26
27 return nil
28}
29
30func (t *Tree) GobEncode() ([]byte, error) {
31 data := &treeGob{
32 Config: t.config,
33 Children: t.children,
34 Name: t.name,
35 Path: t.path,
36 }
37
38 var buf bytes.Buffer
39 enc := gob.NewEncoder(&buf)
40 if err := enc.Encode(data); err != nil {
41 return nil, err
42 }
43
44 return buf.Bytes(), nil
45}
46
47// treeGob is used as a structure to Gob encode a tree.
48//
49// This structure is private so it can't be referenced but the fields are
50// public, allowing Gob to properly encode this. When we decode this, we are
51// able to turn it into a Tree.
52type treeGob struct {
53 Config *config.Config
54 Children map[string]*Tree
55 Name string
56 Path []string
57}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go
new file mode 100644
index 0000000..090d4f7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go
@@ -0,0 +1,118 @@
1package module
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// validateProviderAlias validates that all provider alias references are
12// defined at some point in the parent tree. This improves UX by catching
13// alias typos at the slight cost of requiring a declaration of usage. This
14// is usually a good tradeoff since not many aliases are used.
15func (t *Tree) validateProviderAlias() error {
16 // If we're not the root, don't perform this validation. We must be the
17 // root since we require full tree visibilty.
18 if len(t.path) != 0 {
19 return nil
20 }
21
22 // We'll use a graph to keep track of defined aliases at each level.
23 // As long as a parent defines an alias, it is okay.
24 var g dag.AcyclicGraph
25 t.buildProviderAliasGraph(&g, nil)
26
27 // Go through the graph and check that the usage is all good.
28 var err error
29 for _, v := range g.Vertices() {
30 pv, ok := v.(*providerAliasVertex)
31 if !ok {
32 // This shouldn't happen, just ignore it.
33 continue
34 }
35
36 // If we're not using any aliases, fast track and just continue
37 if len(pv.Used) == 0 {
38 continue
39 }
40
41 // Grab the ancestors since we're going to have to check if our
42 // parents define any of our aliases.
43 var parents []*providerAliasVertex
44 ancestors, _ := g.Ancestors(v)
45 for _, raw := range ancestors.List() {
46 if pv, ok := raw.(*providerAliasVertex); ok {
47 parents = append(parents, pv)
48 }
49 }
50 for k, _ := range pv.Used {
51 // Check if we define this
52 if _, ok := pv.Defined[k]; ok {
53 continue
54 }
55
56 // Check for a parent
57 found := false
58 for _, parent := range parents {
59 _, found = parent.Defined[k]
60 if found {
61 break
62 }
63 }
64 if found {
65 continue
66 }
67
68 // We didn't find the alias, error!
69 err = multierror.Append(err, fmt.Errorf(
70 "module %s: provider alias must be defined by the module or a parent: %s",
71 strings.Join(pv.Path, "."), k))
72 }
73 }
74
75 return err
76}
77
78func (t *Tree) buildProviderAliasGraph(g *dag.AcyclicGraph, parent dag.Vertex) {
79 // Add all our defined aliases
80 defined := make(map[string]struct{})
81 for _, p := range t.config.ProviderConfigs {
82 defined[p.FullName()] = struct{}{}
83 }
84
85 // Add all our used aliases
86 used := make(map[string]struct{})
87 for _, r := range t.config.Resources {
88 if r.Provider != "" {
89 used[r.Provider] = struct{}{}
90 }
91 }
92
93 // Add it to the graph
94 vertex := &providerAliasVertex{
95 Path: t.Path(),
96 Defined: defined,
97 Used: used,
98 }
99 g.Add(vertex)
100
101 // Connect to our parent if we have one
102 if parent != nil {
103 g.Connect(dag.BasicEdge(vertex, parent))
104 }
105
106 // Build all our children
107 for _, c := range t.Children() {
108 c.buildProviderAliasGraph(g, vertex)
109 }
110}
111
112// providerAliasVertex is the vertex for the graph that keeps track of
113// defined provider aliases.
114type providerAliasVertex struct {
115 Path []string
116 Defined map[string]struct{}
117 Used map[string]struct{}
118}
diff --git a/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go
new file mode 100644
index 0000000..00fd43f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go
@@ -0,0 +1,40 @@
1package config
2
3// ProvisionerWhen is an enum for valid values for when to run provisioners.
4type ProvisionerWhen int
5
6const (
7 ProvisionerWhenInvalid ProvisionerWhen = iota
8 ProvisionerWhenCreate
9 ProvisionerWhenDestroy
10)
11
12var provisionerWhenStrs = map[ProvisionerWhen]string{
13 ProvisionerWhenInvalid: "invalid",
14 ProvisionerWhenCreate: "create",
15 ProvisionerWhenDestroy: "destroy",
16}
17
18func (v ProvisionerWhen) String() string {
19 return provisionerWhenStrs[v]
20}
21
22// ProvisionerOnFailure is an enum for valid values for on_failure options
23// for provisioners.
24type ProvisionerOnFailure int
25
26const (
27 ProvisionerOnFailureInvalid ProvisionerOnFailure = iota
28 ProvisionerOnFailureContinue
29 ProvisionerOnFailureFail
30)
31
32var provisionerOnFailureStrs = map[ProvisionerOnFailure]string{
33 ProvisionerOnFailureInvalid: "invalid",
34 ProvisionerOnFailureContinue: "continue",
35 ProvisionerOnFailureFail: "fail",
36}
37
38func (v ProvisionerOnFailure) String() string {
39 return provisionerOnFailureStrs[v]
40}
diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go
new file mode 100644
index 0000000..f8498d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/raw_config.go
@@ -0,0 +1,335 @@
1package config
2
3import (
4 "bytes"
5 "encoding/gob"
6 "sync"
7
8 "github.com/hashicorp/hil"
9 "github.com/hashicorp/hil/ast"
10 "github.com/mitchellh/copystructure"
11 "github.com/mitchellh/reflectwalk"
12)
13
14// UnknownVariableValue is a sentinel value that can be used
15// to denote that the value of a variable is unknown at this time.
16// RawConfig uses this information to build up data about
17// unknown keys.
18const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
19
20// RawConfig is a structure that holds a piece of configuration
21// where the overall structure is unknown since it will be used
22// to configure a plugin or some other similar external component.
23//
24// RawConfigs can be interpolated with variables that come from
25// other resources, user variables, etc.
26//
27// RawConfig supports a query-like interface to request
28// information from deep within the structure.
29type RawConfig struct {
30 Key string
31 Raw map[string]interface{}
32 Interpolations []ast.Node
33 Variables map[string]InterpolatedVariable
34
35 lock sync.Mutex
36 config map[string]interface{}
37 unknownKeys []string
38}
39
40// NewRawConfig creates a new RawConfig structure and populates the
41// publicly readable struct fields.
42func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) {
43 result := &RawConfig{Raw: raw}
44 if err := result.init(); err != nil {
45 return nil, err
46 }
47
48 return result, nil
49}
50
51// RawMap returns a copy of the RawConfig.Raw map.
52func (r *RawConfig) RawMap() map[string]interface{} {
53 r.lock.Lock()
54 defer r.lock.Unlock()
55
56 m := make(map[string]interface{})
57 for k, v := range r.Raw {
58 m[k] = v
59 }
60 return m
61}
62
63// Copy returns a copy of this RawConfig, uninterpolated.
64func (r *RawConfig) Copy() *RawConfig {
65 if r == nil {
66 return nil
67 }
68
69 r.lock.Lock()
70 defer r.lock.Unlock()
71
72 newRaw := make(map[string]interface{})
73 for k, v := range r.Raw {
74 newRaw[k] = v
75 }
76
77 result, err := NewRawConfig(newRaw)
78 if err != nil {
79 panic("copy failed: " + err.Error())
80 }
81
82 result.Key = r.Key
83 return result
84}
85
86// Value returns the value of the configuration if this configuration
87// has a Key set. If this does not have a Key set, nil will be returned.
88func (r *RawConfig) Value() interface{} {
89 if c := r.Config(); c != nil {
90 if v, ok := c[r.Key]; ok {
91 return v
92 }
93 }
94
95 r.lock.Lock()
96 defer r.lock.Unlock()
97 return r.Raw[r.Key]
98}
99
100// Config returns the entire configuration with the variables
101// interpolated from any call to Interpolate.
102//
103// If any interpolated variables are unknown (value set to
104// UnknownVariableValue), the first non-container (map, slice, etc.) element
105// will be removed from the config. The keys of unknown variables
106// can be found using the UnknownKeys function.
107//
108// By pruning out unknown keys from the configuration, the raw
109// structure will always successfully decode into its ultimate
110// structure using something like mapstructure.
111func (r *RawConfig) Config() map[string]interface{} {
112 r.lock.Lock()
113 defer r.lock.Unlock()
114 return r.config
115}
116
117// Interpolate uses the given mapping of variable values and uses
118// those as the values to replace any variables in this raw
119// configuration.
120//
121// Any prior calls to Interpolate are replaced with this one.
122//
123// If a variable key is missing, this will panic.
124func (r *RawConfig) Interpolate(vs map[string]ast.Variable) error {
125 r.lock.Lock()
126 defer r.lock.Unlock()
127
128 config := langEvalConfig(vs)
129 return r.interpolate(func(root ast.Node) (interface{}, error) {
130 // None of the variables we need are computed, meaning we should
131 // be able to properly evaluate.
132 result, err := hil.Eval(root, config)
133 if err != nil {
134 return "", err
135 }
136
137 return result.Value, nil
138 })
139}
140
141// Merge merges another RawConfig into this one (overriding any conflicting
142// values in this config) and returns a new config. The original config
143// is not modified.
144func (r *RawConfig) Merge(other *RawConfig) *RawConfig {
145 r.lock.Lock()
146 defer r.lock.Unlock()
147
148 // Merge the raw configurations
149 raw := make(map[string]interface{})
150 for k, v := range r.Raw {
151 raw[k] = v
152 }
153 for k, v := range other.Raw {
154 raw[k] = v
155 }
156
157 // Create the result
158 result, err := NewRawConfig(raw)
159 if err != nil {
160 panic(err)
161 }
162
163 // Merge the interpolated results
164 result.config = make(map[string]interface{})
165 for k, v := range r.config {
166 result.config[k] = v
167 }
168 for k, v := range other.config {
169 result.config[k] = v
170 }
171
172 // Build the unknown keys
173 if len(r.unknownKeys) > 0 || len(other.unknownKeys) > 0 {
174 unknownKeys := make(map[string]struct{})
175 for _, k := range r.unknownKeys {
176 unknownKeys[k] = struct{}{}
177 }
178 for _, k := range other.unknownKeys {
179 unknownKeys[k] = struct{}{}
180 }
181
182 result.unknownKeys = make([]string, 0, len(unknownKeys))
183 for k, _ := range unknownKeys {
184 result.unknownKeys = append(result.unknownKeys, k)
185 }
186 }
187
188 return result
189}
190
191func (r *RawConfig) init() error {
192 r.lock.Lock()
193 defer r.lock.Unlock()
194
195 r.config = r.Raw
196 r.Interpolations = nil
197 r.Variables = nil
198
199 fn := func(node ast.Node) (interface{}, error) {
200 r.Interpolations = append(r.Interpolations, node)
201 vars, err := DetectVariables(node)
202 if err != nil {
203 return "", err
204 }
205
206 for _, v := range vars {
207 if r.Variables == nil {
208 r.Variables = make(map[string]InterpolatedVariable)
209 }
210
211 r.Variables[v.FullKey()] = v
212 }
213
214 return "", nil
215 }
216
217 walker := &interpolationWalker{F: fn}
218 if err := reflectwalk.Walk(r.Raw, walker); err != nil {
219 return err
220 }
221
222 return nil
223}
224
225func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error {
226 config, err := copystructure.Copy(r.Raw)
227 if err != nil {
228 return err
229 }
230 r.config = config.(map[string]interface{})
231
232 w := &interpolationWalker{F: fn, Replace: true}
233 err = reflectwalk.Walk(r.config, w)
234 if err != nil {
235 return err
236 }
237
238 r.unknownKeys = w.unknownKeys
239 return nil
240}
241
242func (r *RawConfig) merge(r2 *RawConfig) *RawConfig {
243 if r == nil && r2 == nil {
244 return nil
245 }
246
247 if r == nil {
248 r = &RawConfig{}
249 }
250
251 rawRaw, err := copystructure.Copy(r.Raw)
252 if err != nil {
253 panic(err)
254 }
255
256 raw := rawRaw.(map[string]interface{})
257 if r2 != nil {
258 for k, v := range r2.Raw {
259 raw[k] = v
260 }
261 }
262
263 result, err := NewRawConfig(raw)
264 if err != nil {
265 panic(err)
266 }
267
268 return result
269}
270
271// UnknownKeys returns the keys of the configuration that are unknown
272// because they had interpolated variables that must be computed.
273func (r *RawConfig) UnknownKeys() []string {
274 r.lock.Lock()
275 defer r.lock.Unlock()
276 return r.unknownKeys
277}
278
279// See GobEncode
280func (r *RawConfig) GobDecode(b []byte) error {
281 var data gobRawConfig
282 err := gob.NewDecoder(bytes.NewReader(b)).Decode(&data)
283 if err != nil {
284 return err
285 }
286
287 r.Key = data.Key
288 r.Raw = data.Raw
289
290 return r.init()
291}
292
293// GobEncode is a custom Gob encoder to use so that we only include the
294// raw configuration. Interpolated variables and such are lost and the
295// tree of interpolated variables is recomputed on decode, since it is
296// referentially transparent.
297func (r *RawConfig) GobEncode() ([]byte, error) {
298 r.lock.Lock()
299 defer r.lock.Unlock()
300
301 data := gobRawConfig{
302 Key: r.Key,
303 Raw: r.Raw,
304 }
305
306 var buf bytes.Buffer
307 if err := gob.NewEncoder(&buf).Encode(data); err != nil {
308 return nil, err
309 }
310
311 return buf.Bytes(), nil
312}
313
314type gobRawConfig struct {
315 Key string
316 Raw map[string]interface{}
317}
318
319// langEvalConfig returns the evaluation configuration we use to execute.
320func langEvalConfig(vs map[string]ast.Variable) *hil.EvalConfig {
321 funcMap := make(map[string]ast.Function)
322 for k, v := range Funcs() {
323 funcMap[k] = v
324 }
325 funcMap["lookup"] = interpolationFuncLookup(vs)
326 funcMap["keys"] = interpolationFuncKeys(vs)
327 funcMap["values"] = interpolationFuncValues(vs)
328
329 return &hil.EvalConfig{
330 GlobalScope: &ast.BasicScope{
331 VarMap: vs,
332 FuncMap: funcMap,
333 },
334 }
335}
diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode.go b/vendor/github.com/hashicorp/terraform/config/resource_mode.go
new file mode 100644
index 0000000..877c6e8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/resource_mode.go
@@ -0,0 +1,9 @@
1package config
2
3//go:generate stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go
4type ResourceMode int
5
6const (
7 ManagedResourceMode ResourceMode = iota
8 DataResourceMode
9)
diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
new file mode 100644
index 0000000..ea68b4f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT.
2
3package config
4
5import "fmt"
6
7const _ResourceMode_name = "ManagedResourceModeDataResourceMode"
8
9var _ResourceMode_index = [...]uint8{0, 19, 35}
10
11func (i ResourceMode) String() string {
12 if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) {
13 return fmt.Sprintf("ResourceMode(%d)", i)
14 }
15 return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go
new file mode 100644
index 0000000..f7bfadd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/testing.go
@@ -0,0 +1,15 @@
1package config
2
3import (
4 "testing"
5)
6
7// TestRawConfig is used to create a RawConfig for testing.
8func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig {
9 cfg, err := NewRawConfig(c)
10 if err != nil {
11 t.Fatalf("err: %s", err)
12 }
13
14 return cfg
15}
diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go
new file mode 100644
index 0000000..f8776bc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/dag.go
@@ -0,0 +1,286 @@
1package dag
2
3import (
4 "fmt"
5 "sort"
6 "strings"
7
8 "github.com/hashicorp/go-multierror"
9)
10
11// AcyclicGraph is a specialization of Graph that cannot have cycles. With
12// this property, we get the property of sane graph traversal.
13type AcyclicGraph struct {
14 Graph
15}
16
17// WalkFunc is the callback used for walking the graph.
18type WalkFunc func(Vertex) error
19
20// DepthWalkFunc is a walk function that also receives the current depth of the
21// walk as an argument
22type DepthWalkFunc func(Vertex, int) error
23
24func (g *AcyclicGraph) DirectedGraph() Grapher {
25 return g
26}
27
28// Returns a Set that includes every Vertex yielded by walking down from the
29// provided starting Vertex v.
30func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) {
31 s := new(Set)
32 start := AsVertexList(g.DownEdges(v))
33 memoFunc := func(v Vertex, d int) error {
34 s.Add(v)
35 return nil
36 }
37
38 if err := g.DepthFirstWalk(start, memoFunc); err != nil {
39 return nil, err
40 }
41
42 return s, nil
43}
44
45// Returns a Set that includes every Vertex yielded by walking up from the
46// provided starting Vertex v.
47func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) {
48 s := new(Set)
49 start := AsVertexList(g.UpEdges(v))
50 memoFunc := func(v Vertex, d int) error {
51 s.Add(v)
52 return nil
53 }
54
55 if err := g.ReverseDepthFirstWalk(start, memoFunc); err != nil {
56 return nil, err
57 }
58
59 return s, nil
60}
61
62// Root returns the root of the DAG, or an error.
63//
64// Complexity: O(V)
65func (g *AcyclicGraph) Root() (Vertex, error) {
66 roots := make([]Vertex, 0, 1)
67 for _, v := range g.Vertices() {
68 if g.UpEdges(v).Len() == 0 {
69 roots = append(roots, v)
70 }
71 }
72
73 if len(roots) > 1 {
74 // TODO(mitchellh): make this error message a lot better
75 return nil, fmt.Errorf("multiple roots: %#v", roots)
76 }
77
78 if len(roots) == 0 {
79 return nil, fmt.Errorf("no roots found")
80 }
81
82 return roots[0], nil
83}
84
85// TransitiveReduction performs the transitive reduction of graph g in place.
86// The transitive reduction of a graph is a graph with as few edges as
87// possible with the same reachability as the original graph. This means
88// that if there are three nodes A => B => C, and A connects to both
89// B and C, and B connects to C, then the transitive reduction is the
90// same graph with only a single edge between A and B, and a single edge
91// between B and C.
92//
93// The graph must be valid for this operation to behave properly. If
94// Validate() returns an error, the behavior is undefined and the results
95// will likely be unexpected.
96//
97// Complexity: O(V(V+E)), or asymptotically O(VE)
98func (g *AcyclicGraph) TransitiveReduction() {
99 // For each vertex u in graph g, do a DFS starting from each vertex
100 // v such that the edge (u,v) exists (v is a direct descendant of u).
101 //
102 // For each v-prime reachable from v, remove the edge (u, v-prime).
103 defer g.debug.BeginOperation("TransitiveReduction", "").End("")
104
105 for _, u := range g.Vertices() {
106 uTargets := g.DownEdges(u)
107 vs := AsVertexList(g.DownEdges(u))
108
109 g.DepthFirstWalk(vs, func(v Vertex, d int) error {
110 shared := uTargets.Intersection(g.DownEdges(v))
111 for _, vPrime := range AsVertexList(shared) {
112 g.RemoveEdge(BasicEdge(u, vPrime))
113 }
114
115 return nil
116 })
117 }
118}
119
120// Validate validates the DAG. A DAG is valid if it has a single root
121// with no cycles.
122func (g *AcyclicGraph) Validate() error {
123 if _, err := g.Root(); err != nil {
124 return err
125 }
126
127 // Look for cycles of more than 1 component
128 var err error
129 cycles := g.Cycles()
130 if len(cycles) > 0 {
131 for _, cycle := range cycles {
132 cycleStr := make([]string, len(cycle))
133 for j, vertex := range cycle {
134 cycleStr[j] = VertexName(vertex)
135 }
136
137 err = multierror.Append(err, fmt.Errorf(
138 "Cycle: %s", strings.Join(cycleStr, ", ")))
139 }
140 }
141
142 // Look for cycles to self
143 for _, e := range g.Edges() {
144 if e.Source() == e.Target() {
145 err = multierror.Append(err, fmt.Errorf(
146 "Self reference: %s", VertexName(e.Source())))
147 }
148 }
149
150 return err
151}
152
153func (g *AcyclicGraph) Cycles() [][]Vertex {
154 var cycles [][]Vertex
155 for _, cycle := range StronglyConnected(&g.Graph) {
156 if len(cycle) > 1 {
157 cycles = append(cycles, cycle)
158 }
159 }
160 return cycles
161}
162
163// Walk walks the graph, calling your callback as each node is visited.
164// This will walk nodes in parallel if it can. Because the walk is done
165// in parallel, the error returned will be a multierror.
166func (g *AcyclicGraph) Walk(cb WalkFunc) error {
167 defer g.debug.BeginOperation(typeWalk, "").End("")
168
169 w := &Walker{Callback: cb, Reverse: true}
170 w.Update(g)
171 return w.Wait()
172}
173
174// simple convenience helper for converting a dag.Set to a []Vertex
175func AsVertexList(s *Set) []Vertex {
176 rawList := s.List()
177 vertexList := make([]Vertex, len(rawList))
178 for i, raw := range rawList {
179 vertexList[i] = raw.(Vertex)
180 }
181 return vertexList
182}
183
184type vertexAtDepth struct {
185 Vertex Vertex
186 Depth int
187}
188
189// depthFirstWalk does a depth-first walk of the graph starting from
190// the vertices in start. This is not exported now but it would make sense
191// to export this publicly at some point.
192func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error {
193 defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("")
194
195 seen := make(map[Vertex]struct{})
196 frontier := make([]*vertexAtDepth, len(start))
197 for i, v := range start {
198 frontier[i] = &vertexAtDepth{
199 Vertex: v,
200 Depth: 0,
201 }
202 }
203 for len(frontier) > 0 {
204 // Pop the current vertex
205 n := len(frontier)
206 current := frontier[n-1]
207 frontier = frontier[:n-1]
208
209 // Check if we've seen this already and return...
210 if _, ok := seen[current.Vertex]; ok {
211 continue
212 }
213 seen[current.Vertex] = struct{}{}
214
215 // Visit the current node
216 if err := f(current.Vertex, current.Depth); err != nil {
217 return err
218 }
219
220 // Visit targets of this in a consistent order.
221 targets := AsVertexList(g.DownEdges(current.Vertex))
222 sort.Sort(byVertexName(targets))
223 for _, t := range targets {
224 frontier = append(frontier, &vertexAtDepth{
225 Vertex: t,
226 Depth: current.Depth + 1,
227 })
228 }
229 }
230
231 return nil
232}
233
234// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from
235// the vertices in start.
236func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error {
237 defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("")
238
239 seen := make(map[Vertex]struct{})
240 frontier := make([]*vertexAtDepth, len(start))
241 for i, v := range start {
242 frontier[i] = &vertexAtDepth{
243 Vertex: v,
244 Depth: 0,
245 }
246 }
247 for len(frontier) > 0 {
248 // Pop the current vertex
249 n := len(frontier)
250 current := frontier[n-1]
251 frontier = frontier[:n-1]
252
253 // Check if we've seen this already and return...
254 if _, ok := seen[current.Vertex]; ok {
255 continue
256 }
257 seen[current.Vertex] = struct{}{}
258
259 // Add next set of targets in a consistent order.
260 targets := AsVertexList(g.UpEdges(current.Vertex))
261 sort.Sort(byVertexName(targets))
262 for _, t := range targets {
263 frontier = append(frontier, &vertexAtDepth{
264 Vertex: t,
265 Depth: current.Depth + 1,
266 })
267 }
268
269 // Visit the current node
270 if err := f(current.Vertex, current.Depth); err != nil {
271 return err
272 }
273 }
274
275 return nil
276}
277
278// byVertexName implements sort.Interface so a list of Vertices can be sorted
279// consistently by their VertexName
280type byVertexName []Vertex
281
282func (b byVertexName) Len() int { return len(b) }
283func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
284func (b byVertexName) Less(i, j int) bool {
285 return VertexName(b[i]) < VertexName(b[j])
286}
diff --git a/vendor/github.com/hashicorp/terraform/dag/dot.go b/vendor/github.com/hashicorp/terraform/dag/dot.go
new file mode 100644
index 0000000..7e6d2af
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/dot.go
@@ -0,0 +1,282 @@
1package dag
2
3import (
4 "bytes"
5 "fmt"
6 "sort"
7 "strings"
8)
9
10// DotOpts are the options for generating a dot formatted Graph.
11type DotOpts struct {
12 // Allows some nodes to decide to only show themselves when the user has
13 // requested the "verbose" graph.
14 Verbose bool
15
16 // Highlight Cycles
17 DrawCycles bool
18
19 // How many levels to expand modules as we draw
20 MaxDepth int
21
22 // use this to keep the cluster_ naming convention from the previous dot writer
23 cluster bool
24}
25
26// GraphNodeDotter can be implemented by a node to cause it to be included
27// in the dot graph. The Dot method will be called which is expected to
28// return a representation of this node.
29type GraphNodeDotter interface {
30 // Dot is called to return the dot formatting for the node.
31 // The first parameter is the title of the node.
32 // The second parameter includes user-specified options that affect the dot
33 // graph. See GraphDotOpts below for details.
34 DotNode(string, *DotOpts) *DotNode
35}
36
37// DotNode provides a structure for Vertices to return in order to specify their
38// dot format.
39type DotNode struct {
40 Name string
41 Attrs map[string]string
42}
43
44// Returns the DOT representation of this Graph.
45func (g *marshalGraph) Dot(opts *DotOpts) []byte {
46 if opts == nil {
47 opts = &DotOpts{
48 DrawCycles: true,
49 MaxDepth: -1,
50 Verbose: true,
51 }
52 }
53
54 var w indentWriter
55 w.WriteString("digraph {\n")
56 w.Indent()
57
58 // some dot defaults
59 w.WriteString(`compound = "true"` + "\n")
60 w.WriteString(`newrank = "true"` + "\n")
61
62 // the top level graph is written as the first subgraph
63 w.WriteString(`subgraph "root" {` + "\n")
64 g.writeBody(opts, &w)
65
66 // cluster isn't really used other than for naming purposes in some graphs
67 opts.cluster = opts.MaxDepth != 0
68 maxDepth := opts.MaxDepth
69 if maxDepth == 0 {
70 maxDepth = -1
71 }
72
73 for _, s := range g.Subgraphs {
74 g.writeSubgraph(s, opts, maxDepth, &w)
75 }
76
77 w.Unindent()
78 w.WriteString("}\n")
79 return w.Bytes()
80}
81
82func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte {
83 var buf bytes.Buffer
84 graphName := g.Name
85 if graphName == "" {
86 graphName = "root"
87 }
88
89 name := v.Name
90 attrs := v.Attrs
91 if v.graphNodeDotter != nil {
92 node := v.graphNodeDotter.DotNode(name, opts)
93 if node == nil {
94 return []byte{}
95 }
96
97 newAttrs := make(map[string]string)
98 for k, v := range attrs {
99 newAttrs[k] = v
100 }
101 for k, v := range node.Attrs {
102 newAttrs[k] = v
103 }
104
105 name = node.Name
106 attrs = newAttrs
107 }
108
109 buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name))
110 writeAttrs(&buf, attrs)
111 buf.WriteByte('\n')
112
113 return buf.Bytes()
114}
115
116func (e *marshalEdge) dot(g *marshalGraph) string {
117 var buf bytes.Buffer
118 graphName := g.Name
119 if graphName == "" {
120 graphName = "root"
121 }
122
123 sourceName := g.vertexByID(e.Source).Name
124 targetName := g.vertexByID(e.Target).Name
125 s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName)
126 buf.WriteString(s)
127 writeAttrs(&buf, e.Attrs)
128
129 return buf.String()
130}
131
132func cycleDot(e *marshalEdge, g *marshalGraph) string {
133 return e.dot(g) + ` [color = "red", penwidth = "2.0"]`
134}
135
136// Write the subgraph body. The is recursive, and the depth argument is used to
137// record the current depth of iteration.
138func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) {
139 if depth == 0 {
140 return
141 }
142 depth--
143
144 name := sg.Name
145 if opts.cluster {
146 // we prefix with cluster_ to match the old dot output
147 name = "cluster_" + name
148 sg.Attrs["label"] = sg.Name
149 }
150 w.WriteString(fmt.Sprintf("subgraph %q {\n", name))
151 sg.writeBody(opts, w)
152
153 for _, sg := range sg.Subgraphs {
154 g.writeSubgraph(sg, opts, depth, w)
155 }
156}
157
158func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) {
159 w.Indent()
160
161 for _, as := range attrStrings(g.Attrs) {
162 w.WriteString(as + "\n")
163 }
164
165 // list of Vertices that aren't to be included in the dot output
166 skip := map[string]bool{}
167
168 for _, v := range g.Vertices {
169 if v.graphNodeDotter == nil {
170 skip[v.ID] = true
171 continue
172 }
173
174 w.Write(v.dot(g, opts))
175 }
176
177 var dotEdges []string
178
179 if opts.DrawCycles {
180 for _, c := range g.Cycles {
181 if len(c) < 2 {
182 continue
183 }
184
185 for i, j := 0, 1; i < len(c); i, j = i+1, j+1 {
186 if j >= len(c) {
187 j = 0
188 }
189 src := c[i]
190 tgt := c[j]
191
192 if skip[src.ID] || skip[tgt.ID] {
193 continue
194 }
195
196 e := &marshalEdge{
197 Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name),
198 Source: src.ID,
199 Target: tgt.ID,
200 Attrs: make(map[string]string),
201 }
202
203 dotEdges = append(dotEdges, cycleDot(e, g))
204 src = tgt
205 }
206 }
207 }
208
209 for _, e := range g.Edges {
210 dotEdges = append(dotEdges, e.dot(g))
211 }
212
213 // srot these again to match the old output
214 sort.Strings(dotEdges)
215
216 for _, e := range dotEdges {
217 w.WriteString(e + "\n")
218 }
219
220 w.Unindent()
221 w.WriteString("}\n")
222}
223
224func writeAttrs(buf *bytes.Buffer, attrs map[string]string) {
225 if len(attrs) > 0 {
226 buf.WriteString(" [")
227 buf.WriteString(strings.Join(attrStrings(attrs), ", "))
228 buf.WriteString("]")
229 }
230}
231
232func attrStrings(attrs map[string]string) []string {
233 strings := make([]string, 0, len(attrs))
234 for k, v := range attrs {
235 strings = append(strings, fmt.Sprintf("%s = %q", k, v))
236 }
237 sort.Strings(strings)
238 return strings
239}
240
241// Provide a bytes.Buffer like structure, which will indent when starting a
242// newline.
243type indentWriter struct {
244 bytes.Buffer
245 level int
246}
247
248func (w *indentWriter) indent() {
249 newline := []byte("\n")
250 if !bytes.HasSuffix(w.Bytes(), newline) {
251 return
252 }
253 for i := 0; i < w.level; i++ {
254 w.Buffer.WriteString("\t")
255 }
256}
257
258// Indent increases indentation by 1
259func (w *indentWriter) Indent() { w.level++ }
260
261// Unindent decreases indentation by 1
262func (w *indentWriter) Unindent() { w.level-- }
263
264// the following methods intercecpt the byte.Buffer writes and insert the
265// indentation when starting a new line.
266func (w *indentWriter) Write(b []byte) (int, error) {
267 w.indent()
268 return w.Buffer.Write(b)
269}
270
271func (w *indentWriter) WriteString(s string) (int, error) {
272 w.indent()
273 return w.Buffer.WriteString(s)
274}
275func (w *indentWriter) WriteByte(b byte) error {
276 w.indent()
277 return w.Buffer.WriteByte(b)
278}
279func (w *indentWriter) WriteRune(r rune) (int, error) {
280 w.indent()
281 return w.Buffer.WriteRune(r)
282}
diff --git a/vendor/github.com/hashicorp/terraform/dag/edge.go b/vendor/github.com/hashicorp/terraform/dag/edge.go
new file mode 100644
index 0000000..f0d99ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/edge.go
@@ -0,0 +1,37 @@
1package dag
2
3import (
4 "fmt"
5)
6
7// Edge represents an edge in the graph, with a source and target vertex.
8type Edge interface {
9 Source() Vertex
10 Target() Vertex
11
12 Hashable
13}
14
15// BasicEdge returns an Edge implementation that simply tracks the source
16// and target given as-is.
17func BasicEdge(source, target Vertex) Edge {
18 return &basicEdge{S: source, T: target}
19}
20
21// basicEdge is a basic implementation of Edge that has the source and
22// target vertex.
23type basicEdge struct {
24 S, T Vertex
25}
26
27func (e *basicEdge) Hashcode() interface{} {
28 return fmt.Sprintf("%p-%p", e.S, e.T)
29}
30
31func (e *basicEdge) Source() Vertex {
32 return e.S
33}
34
35func (e *basicEdge) Target() Vertex {
36 return e.T
37}
diff --git a/vendor/github.com/hashicorp/terraform/dag/graph.go b/vendor/github.com/hashicorp/terraform/dag/graph.go
new file mode 100644
index 0000000..e7517a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/graph.go
@@ -0,0 +1,391 @@
1package dag
2
3import (
4 "bytes"
5 "encoding/json"
6 "fmt"
7 "io"
8 "sort"
9)
10
11// Graph is used to represent a dependency graph.
12type Graph struct {
13 vertices *Set
14 edges *Set
15 downEdges map[interface{}]*Set
16 upEdges map[interface{}]*Set
17
18 // JSON encoder for recording debug information
19 debug *encoder
20}
21
22// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher.
23type Subgrapher interface {
24 Subgraph() Grapher
25}
26
27// A Grapher is any type that returns a Grapher, mainly used to identify
28// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they
29// return themselves.
30type Grapher interface {
31 DirectedGraph() Grapher
32}
33
34// Vertex of the graph.
35type Vertex interface{}
36
37// NamedVertex is an optional interface that can be implemented by Vertex
38// to give it a human-friendly name that is used for outputting the graph.
39type NamedVertex interface {
40 Vertex
41 Name() string
42}
43
44func (g *Graph) DirectedGraph() Grapher {
45 return g
46}
47
48// Vertices returns the list of all the vertices in the graph.
49func (g *Graph) Vertices() []Vertex {
50 list := g.vertices.List()
51 result := make([]Vertex, len(list))
52 for i, v := range list {
53 result[i] = v.(Vertex)
54 }
55
56 return result
57}
58
59// Edges returns the list of all the edges in the graph.
60func (g *Graph) Edges() []Edge {
61 list := g.edges.List()
62 result := make([]Edge, len(list))
63 for i, v := range list {
64 result[i] = v.(Edge)
65 }
66
67 return result
68}
69
70// EdgesFrom returns the list of edges from the given source.
71func (g *Graph) EdgesFrom(v Vertex) []Edge {
72 var result []Edge
73 from := hashcode(v)
74 for _, e := range g.Edges() {
75 if hashcode(e.Source()) == from {
76 result = append(result, e)
77 }
78 }
79
80 return result
81}
82
83// EdgesTo returns the list of edges to the given target.
84func (g *Graph) EdgesTo(v Vertex) []Edge {
85 var result []Edge
86 search := hashcode(v)
87 for _, e := range g.Edges() {
88 if hashcode(e.Target()) == search {
89 result = append(result, e)
90 }
91 }
92
93 return result
94}
95
96// HasVertex checks if the given Vertex is present in the graph.
97func (g *Graph) HasVertex(v Vertex) bool {
98 return g.vertices.Include(v)
99}
100
101// HasEdge checks if the given Edge is present in the graph.
102func (g *Graph) HasEdge(e Edge) bool {
103 return g.edges.Include(e)
104}
105
106// Add adds a vertex to the graph. This is safe to call multiple time with
107// the same Vertex.
108func (g *Graph) Add(v Vertex) Vertex {
109 g.init()
110 g.vertices.Add(v)
111 g.debug.Add(v)
112 return v
113}
114
115// Remove removes a vertex from the graph. This will also remove any
116// edges with this vertex as a source or target.
117func (g *Graph) Remove(v Vertex) Vertex {
118 // Delete the vertex itself
119 g.vertices.Delete(v)
120 g.debug.Remove(v)
121
122 // Delete the edges to non-existent things
123 for _, target := range g.DownEdges(v).List() {
124 g.RemoveEdge(BasicEdge(v, target))
125 }
126 for _, source := range g.UpEdges(v).List() {
127 g.RemoveEdge(BasicEdge(source, v))
128 }
129
130 return nil
131}
132
133// Replace replaces the original Vertex with replacement. If the original
134// does not exist within the graph, then false is returned. Otherwise, true
135// is returned.
136func (g *Graph) Replace(original, replacement Vertex) bool {
137 // If we don't have the original, we can't do anything
138 if !g.vertices.Include(original) {
139 return false
140 }
141
142 defer g.debug.BeginOperation("Replace", "").End("")
143
144 // If they're the same, then don't do anything
145 if original == replacement {
146 return true
147 }
148
149 // Add our new vertex, then copy all the edges
150 g.Add(replacement)
151 for _, target := range g.DownEdges(original).List() {
152 g.Connect(BasicEdge(replacement, target))
153 }
154 for _, source := range g.UpEdges(original).List() {
155 g.Connect(BasicEdge(source, replacement))
156 }
157
158 // Remove our old vertex, which will also remove all the edges
159 g.Remove(original)
160
161 return true
162}
163
164// RemoveEdge removes an edge from the graph.
165func (g *Graph) RemoveEdge(edge Edge) {
166 g.init()
167 g.debug.RemoveEdge(edge)
168
169 // Delete the edge from the set
170 g.edges.Delete(edge)
171
172 // Delete the up/down edges
173 if s, ok := g.downEdges[hashcode(edge.Source())]; ok {
174 s.Delete(edge.Target())
175 }
176 if s, ok := g.upEdges[hashcode(edge.Target())]; ok {
177 s.Delete(edge.Source())
178 }
179}
180
181// DownEdges returns the outward edges from the source Vertex v.
182func (g *Graph) DownEdges(v Vertex) *Set {
183 g.init()
184 return g.downEdges[hashcode(v)]
185}
186
187// UpEdges returns the inward edges to the destination Vertex v.
188func (g *Graph) UpEdges(v Vertex) *Set {
189 g.init()
190 return g.upEdges[hashcode(v)]
191}
192
193// Connect adds an edge with the given source and target. This is safe to
194// call multiple times with the same value. Note that the same value is
195// verified through pointer equality of the vertices, not through the
196// value of the edge itself.
197func (g *Graph) Connect(edge Edge) {
198 g.init()
199 g.debug.Connect(edge)
200
201 source := edge.Source()
202 target := edge.Target()
203 sourceCode := hashcode(source)
204 targetCode := hashcode(target)
205
206 // Do we have this already? If so, don't add it again.
207 if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) {
208 return
209 }
210
211 // Add the edge to the set
212 g.edges.Add(edge)
213
214 // Add the down edge
215 s, ok := g.downEdges[sourceCode]
216 if !ok {
217 s = new(Set)
218 g.downEdges[sourceCode] = s
219 }
220 s.Add(target)
221
222 // Add the up edge
223 s, ok = g.upEdges[targetCode]
224 if !ok {
225 s = new(Set)
226 g.upEdges[targetCode] = s
227 }
228 s.Add(source)
229}
230
231// String outputs some human-friendly output for the graph structure.
232func (g *Graph) StringWithNodeTypes() string {
233 var buf bytes.Buffer
234
235 // Build the list of node names and a mapping so that we can more
236 // easily alphabetize the output to remain deterministic.
237 vertices := g.Vertices()
238 names := make([]string, 0, len(vertices))
239 mapping := make(map[string]Vertex, len(vertices))
240 for _, v := range vertices {
241 name := VertexName(v)
242 names = append(names, name)
243 mapping[name] = v
244 }
245 sort.Strings(names)
246
247 // Write each node in order...
248 for _, name := range names {
249 v := mapping[name]
250 targets := g.downEdges[hashcode(v)]
251
252 buf.WriteString(fmt.Sprintf("%s - %T\n", name, v))
253
254 // Alphabetize dependencies
255 deps := make([]string, 0, targets.Len())
256 targetNodes := make(map[string]Vertex)
257 for _, target := range targets.List() {
258 dep := VertexName(target)
259 deps = append(deps, dep)
260 targetNodes[dep] = target
261 }
262 sort.Strings(deps)
263
264 // Write dependencies
265 for _, d := range deps {
266 buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d]))
267 }
268 }
269
270 return buf.String()
271}
272
273// String outputs some human-friendly output for the graph structure.
274func (g *Graph) String() string {
275 var buf bytes.Buffer
276
277 // Build the list of node names and a mapping so that we can more
278 // easily alphabetize the output to remain deterministic.
279 vertices := g.Vertices()
280 names := make([]string, 0, len(vertices))
281 mapping := make(map[string]Vertex, len(vertices))
282 for _, v := range vertices {
283 name := VertexName(v)
284 names = append(names, name)
285 mapping[name] = v
286 }
287 sort.Strings(names)
288
289 // Write each node in order...
290 for _, name := range names {
291 v := mapping[name]
292 targets := g.downEdges[hashcode(v)]
293
294 buf.WriteString(fmt.Sprintf("%s\n", name))
295
296 // Alphabetize dependencies
297 deps := make([]string, 0, targets.Len())
298 for _, target := range targets.List() {
299 deps = append(deps, VertexName(target))
300 }
301 sort.Strings(deps)
302
303 // Write dependencies
304 for _, d := range deps {
305 buf.WriteString(fmt.Sprintf(" %s\n", d))
306 }
307 }
308
309 return buf.String()
310}
311
312func (g *Graph) init() {
313 if g.vertices == nil {
314 g.vertices = new(Set)
315 }
316 if g.edges == nil {
317 g.edges = new(Set)
318 }
319 if g.downEdges == nil {
320 g.downEdges = make(map[interface{}]*Set)
321 }
322 if g.upEdges == nil {
323 g.upEdges = make(map[interface{}]*Set)
324 }
325}
326
327// Dot returns a dot-formatted representation of the Graph.
328func (g *Graph) Dot(opts *DotOpts) []byte {
329 return newMarshalGraph("", g).Dot(opts)
330}
331
332// MarshalJSON returns a JSON representation of the entire Graph.
333func (g *Graph) MarshalJSON() ([]byte, error) {
334 dg := newMarshalGraph("root", g)
335 return json.MarshalIndent(dg, "", " ")
336}
337
338// SetDebugWriter sets the io.Writer where the Graph will record debug
339// information. After this is set, the graph will immediately encode itself to
340// the stream, and continue to record all subsequent operations.
341func (g *Graph) SetDebugWriter(w io.Writer) {
342 g.debug = &encoder{w: w}
343 g.debug.Encode(newMarshalGraph("root", g))
344}
345
346// DebugVertexInfo encodes arbitrary information about a vertex in the graph
347// debug logs.
348func (g *Graph) DebugVertexInfo(v Vertex, info string) {
349 va := newVertexInfo(typeVertexInfo, v, info)
350 g.debug.Encode(va)
351}
352
353// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug
354// logs.
355func (g *Graph) DebugEdgeInfo(e Edge, info string) {
356 ea := newEdgeInfo(typeEdgeInfo, e, info)
357 g.debug.Encode(ea)
358}
359
360// DebugVisitInfo records a visit to a Vertex during a walk operation.
361func (g *Graph) DebugVisitInfo(v Vertex, info string) {
362 vi := newVertexInfo(typeVisitInfo, v, info)
363 g.debug.Encode(vi)
364}
365
366// DebugOperation marks the start of a set of graph transformations in
367// the debug log, and returns a DebugOperationEnd func, which marks the end of
368// the operation in the log. Additional information can be added to the log via
369// the info parameter.
370//
371// The returned func's End method allows this method to be called from a single
372// defer statement:
373// defer g.DebugOperationBegin("OpName", "operating").End("")
374//
375// The returned function must be called to properly close the logical operation
376// in the logs.
377func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd {
378 return g.debug.BeginOperation(operation, info)
379}
380
381// VertexName returns the name of a vertex.
382func VertexName(raw Vertex) string {
383 switch v := raw.(type) {
384 case NamedVertex:
385 return v.Name()
386 case fmt.Stringer:
387 return fmt.Sprintf("%s", v)
388 default:
389 return fmt.Sprintf("%v", v)
390 }
391}
diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go
new file mode 100644
index 0000000..16d5dd6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/marshal.go
@@ -0,0 +1,462 @@
1package dag
2
3import (
4 "encoding/json"
5 "fmt"
6 "io"
7 "log"
8 "reflect"
9 "sort"
10 "strconv"
11 "sync"
12)
13
14const (
15 typeOperation = "Operation"
16 typeTransform = "Transform"
17 typeWalk = "Walk"
18 typeDepthFirstWalk = "DepthFirstWalk"
19 typeReverseDepthFirstWalk = "ReverseDepthFirstWalk"
20 typeTransitiveReduction = "TransitiveReduction"
21 typeEdgeInfo = "EdgeInfo"
22 typeVertexInfo = "VertexInfo"
23 typeVisitInfo = "VisitInfo"
24)
25
26// the marshal* structs are for serialization of the graph data.
27type marshalGraph struct {
28 // Type is always "Graph", for identification as a top level object in the
29 // JSON stream.
30 Type string
31
32 // Each marshal structure requires a unique ID so that it can be referenced
33 // by other structures.
34 ID string `json:",omitempty"`
35
36 // Human readable name for this graph.
37 Name string `json:",omitempty"`
38
39 // Arbitrary attributes that can be added to the output.
40 Attrs map[string]string `json:",omitempty"`
41
42 // List of graph vertices, sorted by ID.
43 Vertices []*marshalVertex `json:",omitempty"`
44
45 // List of edges, sorted by Source ID.
46 Edges []*marshalEdge `json:",omitempty"`
47
48 // Any number of subgraphs. A subgraph itself is considered a vertex, and
49 // may be referenced by either end of an edge.
50 Subgraphs []*marshalGraph `json:",omitempty"`
51
52 // Any lists of vertices that are included in cycles.
53 Cycles [][]*marshalVertex `json:",omitempty"`
54}
55
56// The add, remove, connect, removeEdge methods mirror the basic Graph
57// manipulations to reconstruct a marshalGraph from a debug log.
58func (g *marshalGraph) add(v *marshalVertex) {
59 g.Vertices = append(g.Vertices, v)
60 sort.Sort(vertices(g.Vertices))
61}
62
63func (g *marshalGraph) remove(v *marshalVertex) {
64 for i, existing := range g.Vertices {
65 if v.ID == existing.ID {
66 g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...)
67 return
68 }
69 }
70}
71
72func (g *marshalGraph) connect(e *marshalEdge) {
73 g.Edges = append(g.Edges, e)
74 sort.Sort(edges(g.Edges))
75}
76
77func (g *marshalGraph) removeEdge(e *marshalEdge) {
78 for i, existing := range g.Edges {
79 if e.Source == existing.Source && e.Target == existing.Target {
80 g.Edges = append(g.Edges[:i], g.Edges[i+1:]...)
81 return
82 }
83 }
84}
85
86func (g *marshalGraph) vertexByID(id string) *marshalVertex {
87 for _, v := range g.Vertices {
88 if id == v.ID {
89 return v
90 }
91 }
92 return nil
93}
94
95type marshalVertex struct {
96 // Unique ID, used to reference this vertex from other structures.
97 ID string
98
99 // Human readable name
100 Name string `json:",omitempty"`
101
102 Attrs map[string]string `json:",omitempty"`
103
104 // This is to help transition from the old Dot interfaces. We record if the
105 // node was a GraphNodeDotter here, so we can call it to get attributes.
106 graphNodeDotter GraphNodeDotter
107}
108
109func newMarshalVertex(v Vertex) *marshalVertex {
110 dn, ok := v.(GraphNodeDotter)
111 if !ok {
112 dn = nil
113 }
114
115 return &marshalVertex{
116 ID: marshalVertexID(v),
117 Name: VertexName(v),
118 Attrs: make(map[string]string),
119 graphNodeDotter: dn,
120 }
121}
122
123// vertices is a sort.Interface implementation for sorting vertices by ID
124type vertices []*marshalVertex
125
126func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name }
127func (v vertices) Len() int { return len(v) }
128func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
129
130type marshalEdge struct {
131 // Human readable name
132 Name string
133
134 // Source and Target Vertices by ID
135 Source string
136 Target string
137
138 Attrs map[string]string `json:",omitempty"`
139}
140
141func newMarshalEdge(e Edge) *marshalEdge {
142 return &marshalEdge{
143 Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())),
144 Source: marshalVertexID(e.Source()),
145 Target: marshalVertexID(e.Target()),
146 Attrs: make(map[string]string),
147 }
148}
149
150// edges is a sort.Interface implementation for sorting edges by Source ID
151type edges []*marshalEdge
152
153func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name }
154func (e edges) Len() int { return len(e) }
155func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
156
157// build a marshalGraph structure from a *Graph
158func newMarshalGraph(name string, g *Graph) *marshalGraph {
159 mg := &marshalGraph{
160 Type: "Graph",
161 Name: name,
162 Attrs: make(map[string]string),
163 }
164
165 for _, v := range g.Vertices() {
166 id := marshalVertexID(v)
167 if sg, ok := marshalSubgrapher(v); ok {
168 smg := newMarshalGraph(VertexName(v), sg)
169 smg.ID = id
170 mg.Subgraphs = append(mg.Subgraphs, smg)
171 }
172
173 mv := newMarshalVertex(v)
174 mg.Vertices = append(mg.Vertices, mv)
175 }
176
177 sort.Sort(vertices(mg.Vertices))
178
179 for _, e := range g.Edges() {
180 mg.Edges = append(mg.Edges, newMarshalEdge(e))
181 }
182
183 sort.Sort(edges(mg.Edges))
184
185 for _, c := range (&AcyclicGraph{*g}).Cycles() {
186 var cycle []*marshalVertex
187 for _, v := range c {
188 mv := newMarshalVertex(v)
189 cycle = append(cycle, mv)
190 }
191 mg.Cycles = append(mg.Cycles, cycle)
192 }
193
194 return mg
195}
196
197// Attempt to return a unique ID for any vertex.
198func marshalVertexID(v Vertex) string {
199 val := reflect.ValueOf(v)
200 switch val.Kind() {
201 case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
202 return strconv.Itoa(int(val.Pointer()))
203 case reflect.Interface:
204 return strconv.Itoa(int(val.InterfaceData()[1]))
205 }
206
207 if v, ok := v.(Hashable); ok {
208 h := v.Hashcode()
209 if h, ok := h.(string); ok {
210 return h
211 }
212 }
213
214 // fallback to a name, which we hope is unique.
215 return VertexName(v)
216
217 // we could try harder by attempting to read the arbitrary value from the
218 // interface, but we shouldn't get here from terraform right now.
219}
220
221// check for a Subgrapher, and return the underlying *Graph.
222func marshalSubgrapher(v Vertex) (*Graph, bool) {
223 sg, ok := v.(Subgrapher)
224 if !ok {
225 return nil, false
226 }
227
228 switch g := sg.Subgraph().DirectedGraph().(type) {
229 case *Graph:
230 return g, true
231 case *AcyclicGraph:
232 return &g.Graph, true
233 }
234
235 return nil, false
236}
237
238// The DebugOperationEnd func type provides a way to call an End function via a
239// method call, allowing for the chaining of methods in a defer statement.
240type DebugOperationEnd func(string)
241
242// End calls function e with the info parameter, marking the end of this
243// operation in the logs.
244func (e DebugOperationEnd) End(info string) { e(info) }
245
246// encoder provides methods to write debug data to an io.Writer, and is a noop
247// when no writer is present
248type encoder struct {
249 sync.Mutex
250 w io.Writer
251}
252
253// Encode is analogous to json.Encoder.Encode
254func (e *encoder) Encode(i interface{}) {
255 if e == nil || e.w == nil {
256 return
257 }
258 e.Lock()
259 defer e.Unlock()
260
261 js, err := json.Marshal(i)
262 if err != nil {
263 log.Println("[ERROR] dag:", err)
264 return
265 }
266 js = append(js, '\n')
267
268 _, err = e.w.Write(js)
269 if err != nil {
270 log.Println("[ERROR] dag:", err)
271 return
272 }
273}
274
275func (e *encoder) Add(v Vertex) {
276 e.Encode(marshalTransform{
277 Type: typeTransform,
278 AddVertex: newMarshalVertex(v),
279 })
280}
281
282// Remove records the removal of Vertex v.
283func (e *encoder) Remove(v Vertex) {
284 e.Encode(marshalTransform{
285 Type: typeTransform,
286 RemoveVertex: newMarshalVertex(v),
287 })
288}
289
290func (e *encoder) Connect(edge Edge) {
291 e.Encode(marshalTransform{
292 Type: typeTransform,
293 AddEdge: newMarshalEdge(edge),
294 })
295}
296
297func (e *encoder) RemoveEdge(edge Edge) {
298 e.Encode(marshalTransform{
299 Type: typeTransform,
300 RemoveEdge: newMarshalEdge(edge),
301 })
302}
303
304// BeginOperation marks the start of set of graph transformations, and returns
305// an EndDebugOperation func to be called once the opration is complete.
306func (e *encoder) BeginOperation(op string, info string) DebugOperationEnd {
307 if e == nil {
308 return func(string) {}
309 }
310
311 e.Encode(marshalOperation{
312 Type: typeOperation,
313 Begin: op,
314 Info: info,
315 })
316
317 return func(info string) {
318 e.Encode(marshalOperation{
319 Type: typeOperation,
320 End: op,
321 Info: info,
322 })
323 }
324}
325
326// structure for recording graph transformations
327type marshalTransform struct {
328 // Type: "Transform"
329 Type string
330 AddEdge *marshalEdge `json:",omitempty"`
331 RemoveEdge *marshalEdge `json:",omitempty"`
332 AddVertex *marshalVertex `json:",omitempty"`
333 RemoveVertex *marshalVertex `json:",omitempty"`
334}
335
336func (t marshalTransform) Transform(g *marshalGraph) {
337 switch {
338 case t.AddEdge != nil:
339 g.connect(t.AddEdge)
340 case t.RemoveEdge != nil:
341 g.removeEdge(t.RemoveEdge)
342 case t.AddVertex != nil:
343 g.add(t.AddVertex)
344 case t.RemoveVertex != nil:
345 g.remove(t.RemoveVertex)
346 }
347}
348
349// this structure allows us to decode any object in the json stream for
350// inspection, then re-decode it into a proper struct if needed.
351type streamDecode struct {
352 Type string
353 Map map[string]interface{}
354 JSON []byte
355}
356
357func (s *streamDecode) UnmarshalJSON(d []byte) error {
358 s.JSON = d
359 err := json.Unmarshal(d, &s.Map)
360 if err != nil {
361 return err
362 }
363
364 if t, ok := s.Map["Type"]; ok {
365 s.Type, _ = t.(string)
366 }
367 return nil
368}
369
370// structure for recording the beginning and end of any multi-step
371// transformations. These are informational, and not required to reproduce the
372// graph state.
373type marshalOperation struct {
374 Type string
375 Begin string `json:",omitempty"`
376 End string `json:",omitempty"`
377 Info string `json:",omitempty"`
378}
379
380// decodeGraph decodes a marshalGraph from an encoded graph stream.
381func decodeGraph(r io.Reader) (*marshalGraph, error) {
382 dec := json.NewDecoder(r)
383
384 // a stream should always start with a graph
385 g := &marshalGraph{}
386
387 err := dec.Decode(g)
388 if err != nil {
389 return nil, err
390 }
391
392 // now replay any operations that occurred on the original graph
393 for dec.More() {
394 s := &streamDecode{}
395 err := dec.Decode(s)
396 if err != nil {
397 return g, err
398 }
399
400 // the only Type we're concerned with here is Transform to complete the
401 // Graph
402 if s.Type != typeTransform {
403 continue
404 }
405
406 t := &marshalTransform{}
407 err = json.Unmarshal(s.JSON, t)
408 if err != nil {
409 return g, err
410 }
411 t.Transform(g)
412 }
413 return g, nil
414}
415
416// marshalVertexInfo allows encoding arbitrary information about the a single
417// Vertex in the logs. These are accumulated for informational display while
418// rebuilding the graph.
419type marshalVertexInfo struct {
420 Type string
421 Vertex *marshalVertex
422 Info string
423}
424
425func newVertexInfo(infoType string, v Vertex, info string) *marshalVertexInfo {
426 return &marshalVertexInfo{
427 Type: infoType,
428 Vertex: newMarshalVertex(v),
429 Info: info,
430 }
431}
432
433// marshalEdgeInfo allows encoding arbitrary information about the a single
434// Edge in the logs. These are accumulated for informational display while
435// rebuilding the graph.
436type marshalEdgeInfo struct {
437 Type string
438 Edge *marshalEdge
439 Info string
440}
441
442func newEdgeInfo(infoType string, e Edge, info string) *marshalEdgeInfo {
443 return &marshalEdgeInfo{
444 Type: infoType,
445 Edge: newMarshalEdge(e),
446 Info: info,
447 }
448}
449
450// JSON2Dot reads a Graph debug log from and io.Reader, and converts the final
451// graph dot format.
452//
453// TODO: Allow returning the output at a certain point during decode.
454// Encode extra information from the json log into the Dot.
455func JSON2Dot(r io.Reader) ([]byte, error) {
456 g, err := decodeGraph(r)
457 if err != nil {
458 return nil, err
459 }
460
461 return g.Dot(nil), nil
462}
diff --git a/vendor/github.com/hashicorp/terraform/dag/set.go b/vendor/github.com/hashicorp/terraform/dag/set.go
new file mode 100644
index 0000000..3929c9d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/set.go
@@ -0,0 +1,109 @@
1package dag
2
3import (
4 "sync"
5)
6
7// Set is a set data structure.
8type Set struct {
9 m map[interface{}]interface{}
10 once sync.Once
11}
12
13// Hashable is the interface used by set to get the hash code of a value.
14// If this isn't given, then the value of the item being added to the set
15// itself is used as the comparison value.
16type Hashable interface {
17 Hashcode() interface{}
18}
19
20// hashcode returns the hashcode used for set elements.
21func hashcode(v interface{}) interface{} {
22 if h, ok := v.(Hashable); ok {
23 return h.Hashcode()
24 }
25
26 return v
27}
28
29// Add adds an item to the set
30func (s *Set) Add(v interface{}) {
31 s.once.Do(s.init)
32 s.m[hashcode(v)] = v
33}
34
35// Delete removes an item from the set.
36func (s *Set) Delete(v interface{}) {
37 s.once.Do(s.init)
38 delete(s.m, hashcode(v))
39}
40
41// Include returns true/false of whether a value is in the set.
42func (s *Set) Include(v interface{}) bool {
43 s.once.Do(s.init)
44 _, ok := s.m[hashcode(v)]
45 return ok
46}
47
48// Intersection computes the set intersection with other.
49func (s *Set) Intersection(other *Set) *Set {
50 result := new(Set)
51 if s == nil {
52 return result
53 }
54 if other != nil {
55 for _, v := range s.m {
56 if other.Include(v) {
57 result.Add(v)
58 }
59 }
60 }
61
62 return result
63}
64
65// Difference returns a set with the elements that s has but
66// other doesn't.
67func (s *Set) Difference(other *Set) *Set {
68 result := new(Set)
69 if s != nil {
70 for k, v := range s.m {
71 var ok bool
72 if other != nil {
73 _, ok = other.m[k]
74 }
75 if !ok {
76 result.Add(v)
77 }
78 }
79 }
80
81 return result
82}
83
84// Len is the number of items in the set.
85func (s *Set) Len() int {
86 if s == nil {
87 return 0
88 }
89
90 return len(s.m)
91}
92
93// List returns the list of set elements.
94func (s *Set) List() []interface{} {
95 if s == nil {
96 return nil
97 }
98
99 r := make([]interface{}, 0, len(s.m))
100 for _, v := range s.m {
101 r = append(r, v)
102 }
103
104 return r
105}
106
107func (s *Set) init() {
108 s.m = make(map[interface{}]interface{})
109}
diff --git a/vendor/github.com/hashicorp/terraform/dag/tarjan.go b/vendor/github.com/hashicorp/terraform/dag/tarjan.go
new file mode 100644
index 0000000..9d8b25c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/tarjan.go
@@ -0,0 +1,107 @@
1package dag
2
3// StronglyConnected returns the list of strongly connected components
4// within the Graph g. This information is primarily used by this package
5// for cycle detection, but strongly connected components have widespread
6// use.
7func StronglyConnected(g *Graph) [][]Vertex {
8 vs := g.Vertices()
9 acct := sccAcct{
10 NextIndex: 1,
11 VertexIndex: make(map[Vertex]int, len(vs)),
12 }
13 for _, v := range vs {
14 // Recurse on any non-visited nodes
15 if acct.VertexIndex[v] == 0 {
16 stronglyConnected(&acct, g, v)
17 }
18 }
19 return acct.SCC
20}
21
22func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int {
23 // Initial vertex visit
24 index := acct.visit(v)
25 minIdx := index
26
27 for _, raw := range g.DownEdges(v).List() {
28 target := raw.(Vertex)
29 targetIdx := acct.VertexIndex[target]
30
31 // Recurse on successor if not yet visited
32 if targetIdx == 0 {
33 minIdx = min(minIdx, stronglyConnected(acct, g, target))
34 } else if acct.inStack(target) {
35 // Check if the vertex is in the stack
36 minIdx = min(minIdx, targetIdx)
37 }
38 }
39
40 // Pop the strongly connected components off the stack if
41 // this is a root vertex
42 if index == minIdx {
43 var scc []Vertex
44 for {
45 v2 := acct.pop()
46 scc = append(scc, v2)
47 if v2 == v {
48 break
49 }
50 }
51
52 acct.SCC = append(acct.SCC, scc)
53 }
54
55 return minIdx
56}
57
58func min(a, b int) int {
59 if a <= b {
60 return a
61 }
62 return b
63}
64
65// sccAcct is used ot pass around accounting information for
66// the StronglyConnectedComponents algorithm
67type sccAcct struct {
68 NextIndex int
69 VertexIndex map[Vertex]int
70 Stack []Vertex
71 SCC [][]Vertex
72}
73
74// visit assigns an index and pushes a vertex onto the stack
75func (s *sccAcct) visit(v Vertex) int {
76 idx := s.NextIndex
77 s.VertexIndex[v] = idx
78 s.NextIndex++
79 s.push(v)
80 return idx
81}
82
83// push adds a vertex to the stack
84func (s *sccAcct) push(n Vertex) {
85 s.Stack = append(s.Stack, n)
86}
87
88// pop removes a vertex from the stack
89func (s *sccAcct) pop() Vertex {
90 n := len(s.Stack)
91 if n == 0 {
92 return nil
93 }
94 vertex := s.Stack[n-1]
95 s.Stack = s.Stack[:n-1]
96 return vertex
97}
98
99// inStack checks if a vertex is in the stack
100func (s *sccAcct) inStack(needle Vertex) bool {
101 for _, n := range s.Stack {
102 if n == needle {
103 return true
104 }
105 }
106 return false
107}
diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go
new file mode 100644
index 0000000..23c87ad
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/dag/walk.go
@@ -0,0 +1,445 @@
1package dag
2
3import (
4 "errors"
5 "fmt"
6 "log"
7 "sync"
8 "time"
9
10 "github.com/hashicorp/go-multierror"
11)
12
13// Walker is used to walk every vertex of a graph in parallel.
14//
15// A vertex will only be walked when the dependencies of that vertex have
16// been walked. If two vertices can be walked at the same time, they will be.
17//
18// Update can be called to update the graph. This can be called even during
19// a walk, cahnging vertices/edges mid-walk. This should be done carefully.
20// If a vertex is removed but has already been executed, the result of that
21// execution (any error) is still returned by Wait. Changing or re-adding
22// a vertex that has already executed has no effect. Changing edges of
23// a vertex that has already executed has no effect.
24//
25// Non-parallelism can be enforced by introducing a lock in your callback
26// function. However, the goroutine overhead of a walk will remain.
27// Walker will create V*2 goroutines (one for each vertex, and dependency
28// waiter for each vertex). In general this should be of no concern unless
29// there are a huge number of vertices.
30//
31// The walk is depth first by default. This can be changed with the Reverse
32// option.
33//
34// A single walker is only valid for one graph walk. After the walk is complete
35// you must construct a new walker to walk again. State for the walk is never
36// deleted in case vertices or edges are changed.
37type Walker struct {
38 // Callback is what is called for each vertex
39 Callback WalkFunc
40
41 // Reverse, if true, causes the source of an edge to depend on a target.
42 // When false (default), the target depends on the source.
43 Reverse bool
44
45 // changeLock must be held to modify any of the fields below. Only Update
46 // should modify these fields. Modifying them outside of Update can cause
47 // serious problems.
48 changeLock sync.Mutex
49 vertices Set
50 edges Set
51 vertexMap map[Vertex]*walkerVertex
52
53 // wait is done when all vertices have executed. It may become "undone"
54 // if new vertices are added.
55 wait sync.WaitGroup
56
57 // errMap contains the errors recorded so far for execution. Reading
58 // and writing should hold errLock.
59 errMap map[Vertex]error
60 errLock sync.Mutex
61}
62
63type walkerVertex struct {
64 // These should only be set once on initialization and never written again.
65 // They are not protected by a lock since they don't need to be since
66 // they are write-once.
67
68 // DoneCh is closed when this vertex has completed execution, regardless
69 // of success.
70 //
71 // CancelCh is closed when the vertex should cancel execution. If execution
72 // is already complete (DoneCh is closed), this has no effect. Otherwise,
73 // execution is cancelled as quickly as possible.
74 DoneCh chan struct{}
75 CancelCh chan struct{}
76
77 // Dependency information. Any changes to any of these fields requires
78 // holding DepsLock.
79 //
80 // DepsCh is sent a single value that denotes whether the upstream deps
81 // were successful (no errors). Any value sent means that the upstream
82 // dependencies are complete. No other values will ever be sent again.
83 //
84 // DepsUpdateCh is closed when there is a new DepsCh set.
85 DepsCh chan bool
86 DepsUpdateCh chan struct{}
87 DepsLock sync.Mutex
88
89 // Below is not safe to read/write in parallel. This behavior is
90 // enforced by changes only happening in Update. Nothing else should
91 // ever modify these.
92 deps map[Vertex]chan struct{}
93 depsCancelCh chan struct{}
94}
95
96// errWalkUpstream is used in the errMap of a walk to note that an upstream
97// dependency failed so this vertex wasn't run. This is not shown in the final
98// user-returned error.
99var errWalkUpstream = errors.New("upstream dependency failed")
100
101// Wait waits for the completion of the walk and returns any errors (
102// in the form of a multierror) that occurred. Update should be called
103// to populate the walk with vertices and edges prior to calling this.
104//
105// Wait will return as soon as all currently known vertices are complete.
106// If you plan on calling Update with more vertices in the future, you
107// should not call Wait until after this is done.
108func (w *Walker) Wait() error {
109 // Wait for completion
110 w.wait.Wait()
111
112 // Grab the error lock
113 w.errLock.Lock()
114 defer w.errLock.Unlock()
115
116 // Build the error
117 var result error
118 for v, err := range w.errMap {
119 if err != nil && err != errWalkUpstream {
120 result = multierror.Append(result, fmt.Errorf(
121 "%s: %s", VertexName(v), err))
122 }
123 }
124
125 return result
126}
127
128// Update updates the currently executing walk with the given graph.
129// This will perform a diff of the vertices and edges and update the walker.
130// Already completed vertices remain completed (including any errors during
131// their execution).
132//
133// This returns immediately once the walker is updated; it does not wait
134// for completion of the walk.
135//
136// Multiple Updates can be called in parallel. Update can be called at any
137// time during a walk.
138func (w *Walker) Update(g *AcyclicGraph) {
139 var v, e *Set
140 if g != nil {
141 v, e = g.vertices, g.edges
142 }
143
144 // Grab the change lock so no more updates happen but also so that
145 // no new vertices are executed during this time since we may be
146 // removing them.
147 w.changeLock.Lock()
148 defer w.changeLock.Unlock()
149
150 // Initialize fields
151 if w.vertexMap == nil {
152 w.vertexMap = make(map[Vertex]*walkerVertex)
153 }
154
155 // Calculate all our sets
156 newEdges := e.Difference(&w.edges)
157 oldEdges := w.edges.Difference(e)
158 newVerts := v.Difference(&w.vertices)
159 oldVerts := w.vertices.Difference(v)
160
161 // Add the new vertices
162 for _, raw := range newVerts.List() {
163 v := raw.(Vertex)
164
165 // Add to the waitgroup so our walk is not done until everything finishes
166 w.wait.Add(1)
167
168 // Add to our own set so we know about it already
169 log.Printf("[DEBUG] dag/walk: added new vertex: %q", VertexName(v))
170 w.vertices.Add(raw)
171
172 // Initialize the vertex info
173 info := &walkerVertex{
174 DoneCh: make(chan struct{}),
175 CancelCh: make(chan struct{}),
176 deps: make(map[Vertex]chan struct{}),
177 }
178
179 // Add it to the map and kick off the walk
180 w.vertexMap[v] = info
181 }
182
183 // Remove the old vertices
184 for _, raw := range oldVerts.List() {
185 v := raw.(Vertex)
186
187 // Get the vertex info so we can cancel it
188 info, ok := w.vertexMap[v]
189 if !ok {
190 // This vertex for some reason was never in our map. This
191 // shouldn't be possible.
192 continue
193 }
194
195 // Cancel the vertex
196 close(info.CancelCh)
197
198 // Delete it out of the map
199 delete(w.vertexMap, v)
200
201 log.Printf("[DEBUG] dag/walk: removed vertex: %q", VertexName(v))
202 w.vertices.Delete(raw)
203 }
204
205 // Add the new edges
206 var changedDeps Set
207 for _, raw := range newEdges.List() {
208 edge := raw.(Edge)
209 waiter, dep := w.edgeParts(edge)
210
211 // Get the info for the waiter
212 waiterInfo, ok := w.vertexMap[waiter]
213 if !ok {
214 // Vertex doesn't exist... shouldn't be possible but ignore.
215 continue
216 }
217
218 // Get the info for the dep
219 depInfo, ok := w.vertexMap[dep]
220 if !ok {
221 // Vertex doesn't exist... shouldn't be possible but ignore.
222 continue
223 }
224
225 // Add the dependency to our waiter
226 waiterInfo.deps[dep] = depInfo.DoneCh
227
228 // Record that the deps changed for this waiter
229 changedDeps.Add(waiter)
230
231 log.Printf(
232 "[DEBUG] dag/walk: added edge: %q waiting on %q",
233 VertexName(waiter), VertexName(dep))
234 w.edges.Add(raw)
235 }
236
237 // Process reoved edges
238 for _, raw := range oldEdges.List() {
239 edge := raw.(Edge)
240 waiter, dep := w.edgeParts(edge)
241
242 // Get the info for the waiter
243 waiterInfo, ok := w.vertexMap[waiter]
244 if !ok {
245 // Vertex doesn't exist... shouldn't be possible but ignore.
246 continue
247 }
248
249 // Delete the dependency from the waiter
250 delete(waiterInfo.deps, dep)
251
252 // Record that the deps changed for this waiter
253 changedDeps.Add(waiter)
254
255 log.Printf(
256 "[DEBUG] dag/walk: removed edge: %q waiting on %q",
257 VertexName(waiter), VertexName(dep))
258 w.edges.Delete(raw)
259 }
260
261 // For each vertex with changed dependencies, we need to kick off
262 // a new waiter and notify the vertex of the changes.
263 for _, raw := range changedDeps.List() {
264 v := raw.(Vertex)
265 info, ok := w.vertexMap[v]
266 if !ok {
267 // Vertex doesn't exist... shouldn't be possible but ignore.
268 continue
269 }
270
271 // Create a new done channel
272 doneCh := make(chan bool, 1)
273
274 // Create the channel we close for cancellation
275 cancelCh := make(chan struct{})
276
277 // Build a new deps copy
278 deps := make(map[Vertex]<-chan struct{})
279 for k, v := range info.deps {
280 deps[k] = v
281 }
282
283 // Update the update channel
284 info.DepsLock.Lock()
285 if info.DepsUpdateCh != nil {
286 close(info.DepsUpdateCh)
287 }
288 info.DepsCh = doneCh
289 info.DepsUpdateCh = make(chan struct{})
290 info.DepsLock.Unlock()
291
292 // Cancel the older waiter
293 if info.depsCancelCh != nil {
294 close(info.depsCancelCh)
295 }
296 info.depsCancelCh = cancelCh
297
298 log.Printf(
299 "[DEBUG] dag/walk: dependencies changed for %q, sending new deps",
300 VertexName(v))
301
302 // Start the waiter
303 go w.waitDeps(v, deps, doneCh, cancelCh)
304 }
305
306 // Start all the new vertices. We do this at the end so that all
307 // the edge waiters and changes are setup above.
308 for _, raw := range newVerts.List() {
309 v := raw.(Vertex)
310 go w.walkVertex(v, w.vertexMap[v])
311 }
312}
313
314// edgeParts returns the waiter and the dependency, in that order.
315// The waiter is waiting on the dependency.
316func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) {
317 if w.Reverse {
318 return e.Source(), e.Target()
319 }
320
321 return e.Target(), e.Source()
322}
323
324// walkVertex walks a single vertex, waiting for any dependencies before
325// executing the callback.
326func (w *Walker) walkVertex(v Vertex, info *walkerVertex) {
327 // When we're done executing, lower the waitgroup count
328 defer w.wait.Done()
329
330 // When we're done, always close our done channel
331 defer close(info.DoneCh)
332
333 // Wait for our dependencies. We create a [closed] deps channel so
334 // that we can immediately fall through to load our actual DepsCh.
335 var depsSuccess bool
336 var depsUpdateCh chan struct{}
337 depsCh := make(chan bool, 1)
338 depsCh <- true
339 close(depsCh)
340 for {
341 select {
342 case <-info.CancelCh:
343 // Cancel
344 return
345
346 case depsSuccess = <-depsCh:
347 // Deps complete! Mark as nil to trigger completion handling.
348 depsCh = nil
349
350 case <-depsUpdateCh:
351 // New deps, reloop
352 }
353
354 // Check if we have updated dependencies. This can happen if the
355 // dependencies were satisfied exactly prior to an Update occurring.
356 // In that case, we'd like to take into account new dependencies
357 // if possible.
358 info.DepsLock.Lock()
359 if info.DepsCh != nil {
360 depsCh = info.DepsCh
361 info.DepsCh = nil
362 }
363 if info.DepsUpdateCh != nil {
364 depsUpdateCh = info.DepsUpdateCh
365 }
366 info.DepsLock.Unlock()
367
368 // If we still have no deps channel set, then we're done!
369 if depsCh == nil {
370 break
371 }
372 }
373
374 // If we passed dependencies, we just want to check once more that
375 // we're not cancelled, since this can happen just as dependencies pass.
376 select {
377 case <-info.CancelCh:
378 // Cancelled during an update while dependencies completed.
379 return
380 default:
381 }
382
383 // Run our callback or note that our upstream failed
384 var err error
385 if depsSuccess {
386 log.Printf("[DEBUG] dag/walk: walking %q", VertexName(v))
387 err = w.Callback(v)
388 } else {
389 log.Printf("[DEBUG] dag/walk: upstream errored, not walking %q", VertexName(v))
390 err = errWalkUpstream
391 }
392
393 // Record the error
394 if err != nil {
395 w.errLock.Lock()
396 defer w.errLock.Unlock()
397
398 if w.errMap == nil {
399 w.errMap = make(map[Vertex]error)
400 }
401 w.errMap[v] = err
402 }
403}
404
405func (w *Walker) waitDeps(
406 v Vertex,
407 deps map[Vertex]<-chan struct{},
408 doneCh chan<- bool,
409 cancelCh <-chan struct{}) {
410 // For each dependency given to us, wait for it to complete
411 for dep, depCh := range deps {
412 DepSatisfied:
413 for {
414 select {
415 case <-depCh:
416 // Dependency satisfied!
417 break DepSatisfied
418
419 case <-cancelCh:
420 // Wait cancelled. Note that we didn't satisfy dependencies
421 // so that anything waiting on us also doesn't run.
422 doneCh <- false
423 return
424
425 case <-time.After(time.Second * 5):
426 log.Printf("[DEBUG] dag/walk: vertex %q, waiting for: %q",
427 VertexName(v), VertexName(dep))
428 }
429 }
430 }
431
432 // Dependencies satisfied! We need to check if any errored
433 w.errLock.Lock()
434 defer w.errLock.Unlock()
435 for dep, _ := range deps {
436 if w.errMap[dep] != nil {
437 // One of our dependencies failed, so return false
438 doneCh <- false
439 return
440 }
441 }
442
443 // All dependencies satisfied and successful
444 doneCh <- true
445}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go
new file mode 100644
index 0000000..e0b81b6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/flatmap/expand.go
@@ -0,0 +1,147 @@
1package flatmap
2
3import (
4 "fmt"
5 "sort"
6 "strconv"
7 "strings"
8
9 "github.com/hashicorp/hil"
10)
11
12// Expand takes a map and a key (prefix) and expands that value into
13// a more complex structure. This is the reverse of the Flatten operation.
14func Expand(m map[string]string, key string) interface{} {
15 // If the key is exactly a key in the map, just return it
16 if v, ok := m[key]; ok {
17 if v == "true" {
18 return true
19 } else if v == "false" {
20 return false
21 }
22
23 return v
24 }
25
26 // Check if the key is an array, and if so, expand the array
27 if v, ok := m[key+".#"]; ok {
28 // If the count of the key is unknown, then just put the unknown
29 // value in the value itself. This will be detected by Terraform
30 // core later.
31 if v == hil.UnknownValue {
32 return v
33 }
34
35 return expandArray(m, key)
36 }
37
38 // Check if this is a prefix in the map
39 prefix := key + "."
40 for k := range m {
41 if strings.HasPrefix(k, prefix) {
42 return expandMap(m, prefix)
43 }
44 }
45
46 return nil
47}
48
49func expandArray(m map[string]string, prefix string) []interface{} {
50 num, err := strconv.ParseInt(m[prefix+".#"], 0, 0)
51 if err != nil {
52 panic(err)
53 }
54
55 // If the number of elements in this array is 0, then return an
56 // empty slice as there is nothing to expand. Trying to expand it
57 // anyway could lead to crashes as any child maps, arrays or sets
58 // that no longer exist are still shown as empty with a count of 0.
59 if num == 0 {
60 return []interface{}{}
61 }
62
63 // The Schema "Set" type stores its values in an array format, but
64 // using numeric hash values instead of ordinal keys. Take the set
65 // of keys regardless of value, and expand them in numeric order.
66 // See GH-11042 for more details.
67 keySet := map[int]bool{}
68 computed := map[string]bool{}
69 for k := range m {
70 if !strings.HasPrefix(k, prefix+".") {
71 continue
72 }
73
74 key := k[len(prefix)+1:]
75 idx := strings.Index(key, ".")
76 if idx != -1 {
77 key = key[:idx]
78 }
79
80 // skip the count value
81 if key == "#" {
82 continue
83 }
84
85 // strip the computed flag if there is one
86 if strings.HasPrefix(key, "~") {
87 key = key[1:]
88 computed[key] = true
89 }
90
91 k, err := strconv.Atoi(key)
92 if err != nil {
93 panic(err)
94 }
95 keySet[int(k)] = true
96 }
97
98 keysList := make([]int, 0, num)
99 for key := range keySet {
100 keysList = append(keysList, key)
101 }
102 sort.Ints(keysList)
103
104 result := make([]interface{}, num)
105 for i, key := range keysList {
106 keyString := strconv.Itoa(key)
107 if computed[keyString] {
108 keyString = "~" + keyString
109 }
110 result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString))
111 }
112
113 return result
114}
115
116func expandMap(m map[string]string, prefix string) map[string]interface{} {
117 // Submaps may not have a '%' key, so we can't count on this value being
118 // here. If we don't have a count, just proceed as if we have have a map.
119 if count, ok := m[prefix+"%"]; ok && count == "0" {
120 return map[string]interface{}{}
121 }
122
123 result := make(map[string]interface{})
124 for k := range m {
125 if !strings.HasPrefix(k, prefix) {
126 continue
127 }
128
129 key := k[len(prefix):]
130 idx := strings.Index(key, ".")
131 if idx != -1 {
132 key = key[:idx]
133 }
134 if _, ok := result[key]; ok {
135 continue
136 }
137
138 // skip the map count value
139 if key == "%" {
140 continue
141 }
142
143 result[key] = Expand(m, k[:len(prefix)+len(key)])
144 }
145
146 return result
147}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go
new file mode 100644
index 0000000..9ff6e42
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go
@@ -0,0 +1,71 @@
1package flatmap
2
3import (
4 "fmt"
5 "reflect"
6)
7
8// Flatten takes a structure and turns into a flat map[string]string.
9//
10// Within the "thing" parameter, only primitive values are allowed. Structs are
11// not supported. Therefore, it can only be slices, maps, primitives, and
12// any combination of those together.
13//
14// See the tests for examples of what inputs are turned into.
15func Flatten(thing map[string]interface{}) Map {
16 result := make(map[string]string)
17
18 for k, raw := range thing {
19 flatten(result, k, reflect.ValueOf(raw))
20 }
21
22 return Map(result)
23}
24
25func flatten(result map[string]string, prefix string, v reflect.Value) {
26 if v.Kind() == reflect.Interface {
27 v = v.Elem()
28 }
29
30 switch v.Kind() {
31 case reflect.Bool:
32 if v.Bool() {
33 result[prefix] = "true"
34 } else {
35 result[prefix] = "false"
36 }
37 case reflect.Int:
38 result[prefix] = fmt.Sprintf("%d", v.Int())
39 case reflect.Map:
40 flattenMap(result, prefix, v)
41 case reflect.Slice:
42 flattenSlice(result, prefix, v)
43 case reflect.String:
44 result[prefix] = v.String()
45 default:
46 panic(fmt.Sprintf("Unknown: %s", v))
47 }
48}
49
50func flattenMap(result map[string]string, prefix string, v reflect.Value) {
51 for _, k := range v.MapKeys() {
52 if k.Kind() == reflect.Interface {
53 k = k.Elem()
54 }
55
56 if k.Kind() != reflect.String {
57 panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k))
58 }
59
60 flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k))
61 }
62}
63
64func flattenSlice(result map[string]string, prefix string, v reflect.Value) {
65 prefix = prefix + "."
66
67 result[prefix+"#"] = fmt.Sprintf("%d", v.Len())
68 for i := 0; i < v.Len(); i++ {
69 flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i))
70 }
71}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/map.go b/vendor/github.com/hashicorp/terraform/flatmap/map.go
new file mode 100644
index 0000000..46b72c4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/flatmap/map.go
@@ -0,0 +1,82 @@
1package flatmap
2
3import (
4 "strings"
5)
6
7// Map is a wrapper around map[string]string that provides some helpers
8// above it that assume the map is in the format that flatmap expects
9// (the result of Flatten).
10//
11// All modifying functions such as Delete are done in-place unless
12// otherwise noted.
13type Map map[string]string
14
15// Contains returns true if the map contains the given key.
16func (m Map) Contains(key string) bool {
17 for _, k := range m.Keys() {
18 if k == key {
19 return true
20 }
21 }
22
23 return false
24}
25
26// Delete deletes a key out of the map with the given prefix.
27func (m Map) Delete(prefix string) {
28 for k, _ := range m {
29 match := k == prefix
30 if !match {
31 if !strings.HasPrefix(k, prefix) {
32 continue
33 }
34
35 if k[len(prefix):len(prefix)+1] != "." {
36 continue
37 }
38 }
39
40 delete(m, k)
41 }
42}
43
44// Keys returns all of the top-level keys in this map
45func (m Map) Keys() []string {
46 ks := make(map[string]struct{})
47 for k, _ := range m {
48 idx := strings.Index(k, ".")
49 if idx == -1 {
50 idx = len(k)
51 }
52
53 ks[k[:idx]] = struct{}{}
54 }
55
56 result := make([]string, 0, len(ks))
57 for k, _ := range ks {
58 result = append(result, k)
59 }
60
61 return result
62}
63
64// Merge merges the contents of the other Map into this one.
65//
66// This merge is smarter than a simple map iteration because it
67// will fully replace arrays and other complex structures that
68// are present in this map with the other map's. For example, if
69// this map has a 3 element "foo" list, and m2 has a 2 element "foo"
70// list, then the result will be that m has a 2 element "foo"
71// list.
72func (m Map) Merge(m2 Map) {
73 for _, prefix := range m2.Keys() {
74 m.Delete(prefix)
75
76 for k, v := range m2 {
77 if strings.HasPrefix(k, prefix) {
78 m[k] = v
79 }
80 }
81 }
82}
diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go b/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go
new file mode 100644
index 0000000..9d31031
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go
@@ -0,0 +1,2 @@
1// Package acctest contains for Terraform Acceptance Tests
2package acctest
diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/random.go b/vendor/github.com/hashicorp/terraform/helper/acctest/random.go
new file mode 100644
index 0000000..3ddc078
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/acctest/random.go
@@ -0,0 +1,93 @@
1package acctest
2
3import (
4 "bufio"
5 "bytes"
6 crand "crypto/rand"
7 "crypto/rsa"
8 "crypto/x509"
9 "encoding/pem"
10 "fmt"
11 "math/rand"
12 "strings"
13 "time"
14
15 "golang.org/x/crypto/ssh"
16)
17
18// Helpers for generating random tidbits for use in identifiers to prevent
19// collisions in acceptance tests.
20
21// RandInt generates a random integer
22func RandInt() int {
23 reseed()
24 return rand.New(rand.NewSource(time.Now().UnixNano())).Int()
25}
26
27// RandomWithPrefix is used to generate a unique name with a prefix, for
28// randomizing names in acceptance tests
29func RandomWithPrefix(name string) string {
30 reseed()
31 return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int())
32}
33
34func RandIntRange(min int, max int) int {
35 reseed()
36 source := rand.New(rand.NewSource(time.Now().UnixNano()))
37 rangeMax := max - min
38
39 return int(source.Int31n(int32(rangeMax)))
40}
41
42// RandString generates a random alphanumeric string of the length specified
43func RandString(strlen int) string {
44 return RandStringFromCharSet(strlen, CharSetAlphaNum)
45}
46
47// RandStringFromCharSet generates a random string by selecting characters from
48// the charset provided
49func RandStringFromCharSet(strlen int, charSet string) string {
50 reseed()
51 result := make([]byte, strlen)
52 for i := 0; i < strlen; i++ {
53 result[i] = charSet[rand.Intn(len(charSet))]
54 }
55 return string(result)
56}
57
58// RandSSHKeyPair generates a public and private SSH key pair. The public key is
59// returned in OpenSSH format, and the private key is PEM encoded.
60func RandSSHKeyPair(comment string) (string, string, error) {
61 privateKey, err := rsa.GenerateKey(crand.Reader, 1024)
62 if err != nil {
63 return "", "", err
64 }
65
66 var privateKeyBuffer bytes.Buffer
67 privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}
68 if err := pem.Encode(bufio.NewWriter(&privateKeyBuffer), privateKeyPEM); err != nil {
69 return "", "", err
70 }
71
72 publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey)
73 if err != nil {
74 return "", "", err
75 }
76 keyMaterial := strings.TrimSpace(string(ssh.MarshalAuthorizedKey(publicKey)))
77 return fmt.Sprintf("%s %s", keyMaterial, comment), privateKeyBuffer.String(), nil
78}
79
80// Seeds random with current timestamp
81func reseed() {
82 rand.Seed(time.Now().UTC().UnixNano())
83}
84
85const (
86 // CharSetAlphaNum is the alphanumeric character set for use with
87 // RandStringFromCharSet
88 CharSetAlphaNum = "abcdefghijklmnopqrstuvwxyz012346789"
89
90 // CharSetAlpha is the alphabetical character set for use with
91 // RandStringFromCharSet
92 CharSetAlpha = "abcdefghijklmnopqrstuvwxyz"
93)
diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go b/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go
new file mode 100644
index 0000000..87c60b8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go
@@ -0,0 +1,27 @@
1package acctest
2
3import (
4 "net/http"
5 "os"
6 "testing"
7)
8
9// SkipRemoteTestsEnvVar is an environment variable that can be set by a user
10// running the tests in an environment with limited network connectivity. By
11// default, tests requiring internet connectivity make an effort to skip if no
12// internet is available, but in some cases the smoke test will pass even
13// though the test should still be skipped.
14const SkipRemoteTestsEnvVar = "TF_SKIP_REMOTE_TESTS"
15
16// RemoteTestPrecheck is meant to be run by any unit test that requires
17// outbound internet connectivity. The test will be skipped if it's
18// unavailable.
19func RemoteTestPrecheck(t *testing.T) {
20 if os.Getenv(SkipRemoteTestsEnvVar) != "" {
21 t.Skipf("skipping test, %s was set", SkipRemoteTestsEnvVar)
22 }
23
24 if _, err := http.Get("http://google.com"); err != nil {
25 t.Skipf("skipping, internet seems to not be available: %s", err)
26 }
27}
diff --git a/vendor/github.com/hashicorp/terraform/helper/config/decode.go b/vendor/github.com/hashicorp/terraform/helper/config/decode.go
new file mode 100644
index 0000000..f470c9b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/config/decode.go
@@ -0,0 +1,28 @@
1package config
2
3import (
4 "github.com/mitchellh/mapstructure"
5)
6
7func Decode(target interface{}, raws ...interface{}) (*mapstructure.Metadata, error) {
8 var md mapstructure.Metadata
9 decoderConfig := &mapstructure.DecoderConfig{
10 Metadata: &md,
11 Result: target,
12 WeaklyTypedInput: true,
13 }
14
15 decoder, err := mapstructure.NewDecoder(decoderConfig)
16 if err != nil {
17 return nil, err
18 }
19
20 for _, raw := range raws {
21 err := decoder.Decode(raw)
22 if err != nil {
23 return nil, err
24 }
25 }
26
27 return &md, nil
28}
diff --git a/vendor/github.com/hashicorp/terraform/helper/config/validator.go b/vendor/github.com/hashicorp/terraform/helper/config/validator.go
new file mode 100644
index 0000000..1a6e023
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/config/validator.go
@@ -0,0 +1,214 @@
1package config
2
3import (
4 "fmt"
5 "strconv"
6 "strings"
7
8 "github.com/hashicorp/terraform/flatmap"
9 "github.com/hashicorp/terraform/terraform"
10)
11
12// Validator is a helper that helps you validate the configuration
13// of your resource, resource provider, etc.
14//
15// At the most basic level, set the Required and Optional lists to be
16// specifiers of keys that are required or optional. If a key shows up
17// that isn't in one of these two lists, then an error is generated.
18//
19// The "specifiers" allowed in this is a fairly rich syntax to help
20// describe the format of your configuration:
21//
22// * Basic keys are just strings. For example: "foo" will match the
23// "foo" key.
24//
25// * Nested structure keys can be matched by doing
26// "listener.*.foo". This will verify that there is at least one
27// listener element that has the "foo" key set.
28//
29// * The existence of a nested structure can be checked by simply
30// doing "listener.*" which will verify that there is at least
31// one element in the "listener" structure. This is NOT
32// validating that "listener" is an array. It is validating
33// that it is a nested structure in the configuration.
34//
35type Validator struct {
36 Required []string
37 Optional []string
38}
39
40func (v *Validator) Validate(
41 c *terraform.ResourceConfig) (ws []string, es []error) {
42 // Flatten the configuration so it is easier to reason about
43 flat := flatmap.Flatten(c.Raw)
44
45 keySet := make(map[string]validatorKey)
46 for i, vs := range [][]string{v.Required, v.Optional} {
47 req := i == 0
48 for _, k := range vs {
49 vk, err := newValidatorKey(k, req)
50 if err != nil {
51 es = append(es, err)
52 continue
53 }
54
55 keySet[k] = vk
56 }
57 }
58
59 purged := make([]string, 0)
60 for _, kv := range keySet {
61 p, w, e := kv.Validate(flat)
62 if len(w) > 0 {
63 ws = append(ws, w...)
64 }
65 if len(e) > 0 {
66 es = append(es, e...)
67 }
68
69 purged = append(purged, p...)
70 }
71
72 // Delete all the keys we processed in order to find
73 // the unknown keys.
74 for _, p := range purged {
75 delete(flat, p)
76 }
77
78 // The rest are unknown
79 for k, _ := range flat {
80 es = append(es, fmt.Errorf("Unknown configuration: %s", k))
81 }
82
83 return
84}
85
86type validatorKey interface {
87 // Validate validates the given configuration and returns viewed keys,
88 // warnings, and errors.
89 Validate(map[string]string) ([]string, []string, []error)
90}
91
92func newValidatorKey(k string, req bool) (validatorKey, error) {
93 var result validatorKey
94
95 parts := strings.Split(k, ".")
96 if len(parts) > 1 && parts[1] == "*" {
97 result = &nestedValidatorKey{
98 Parts: parts,
99 Required: req,
100 }
101 } else {
102 result = &basicValidatorKey{
103 Key: k,
104 Required: req,
105 }
106 }
107
108 return result, nil
109}
110
111// basicValidatorKey validates keys that are basic such as "foo"
112type basicValidatorKey struct {
113 Key string
114 Required bool
115}
116
117func (v *basicValidatorKey) Validate(
118 m map[string]string) ([]string, []string, []error) {
119 for k, _ := range m {
120 // If we have the exact key its a match
121 if k == v.Key {
122 return []string{k}, nil, nil
123 }
124 }
125
126 if !v.Required {
127 return nil, nil, nil
128 }
129
130 return nil, nil, []error{fmt.Errorf(
131 "Key not found: %s", v.Key)}
132}
133
134type nestedValidatorKey struct {
135 Parts []string
136 Required bool
137}
138
139func (v *nestedValidatorKey) validate(
140 m map[string]string,
141 prefix string,
142 offset int) ([]string, []string, []error) {
143 if offset >= len(v.Parts) {
144 // We're at the end. Look for a specific key.
145 v2 := &basicValidatorKey{Key: prefix, Required: v.Required}
146 return v2.Validate(m)
147 }
148
149 current := v.Parts[offset]
150
151 // If we're at offset 0, special case to start at the next one.
152 if offset == 0 {
153 return v.validate(m, current, offset+1)
154 }
155
156 // Determine if we're doing a "for all" or a specific key
157 if current != "*" {
158 // We're looking at a specific key, continue on.
159 return v.validate(m, prefix+"."+current, offset+1)
160 }
161
162 // We're doing a "for all", so we loop over.
163 countStr, ok := m[prefix+".#"]
164 if !ok {
165 if !v.Required {
166 // It wasn't required, so its no problem.
167 return nil, nil, nil
168 }
169
170 return nil, nil, []error{fmt.Errorf(
171 "Key not found: %s", prefix)}
172 }
173
174 count, err := strconv.ParseInt(countStr, 0, 0)
175 if err != nil {
176 // This shouldn't happen if flatmap works properly
177 panic("invalid flatmap array")
178 }
179
180 var e []error
181 var w []string
182 u := make([]string, 1, count+1)
183 u[0] = prefix + ".#"
184 for i := 0; i < int(count); i++ {
185 prefix := fmt.Sprintf("%s.%d", prefix, i)
186
187 // Mark that we saw this specific key
188 u = append(u, prefix)
189
190 // Mark all prefixes of this
191 for k, _ := range m {
192 if !strings.HasPrefix(k, prefix+".") {
193 continue
194 }
195 u = append(u, k)
196 }
197
198 // If we have more parts, then validate deeper
199 if offset+1 < len(v.Parts) {
200 u2, w2, e2 := v.validate(m, prefix, offset+1)
201
202 u = append(u, u2...)
203 w = append(w, w2...)
204 e = append(e, e2...)
205 }
206 }
207
208 return u, w, e
209}
210
211func (v *nestedValidatorKey) Validate(
212 m map[string]string) ([]string, []string, []error) {
213 return v.validate(m, "", 0)
214}
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go
new file mode 100644
index 0000000..18b8837
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go
@@ -0,0 +1,154 @@
1// experiment package contains helper functions for tracking experimental
2// features throughout Terraform.
3//
4// This package should be used for creating, enabling, querying, and deleting
5// experimental features. By unifying all of that onto a single interface,
6// we can have the Go compiler help us by enforcing every place we touch
7// an experimental feature.
8//
9// To create a new experiment:
10//
11// 1. Add the experiment to the global vars list below, prefixed with X_
12//
13// 2. Add the experiment variable to the All listin the init() function
14//
15// 3. Use it!
16//
17// To remove an experiment:
18//
19// 1. Delete the experiment global var.
20//
21// 2. Try to compile and fix all the places where the var was referenced.
22//
23// To use an experiment:
24//
25// 1. Use Flag() if you want the experiment to be available from the CLI.
26//
27// 2. Use Enabled() to check whether it is enabled.
28//
29// As a general user:
30//
31// 1. The `-Xexperiment-name` flag
32// 2. The `TF_X_<experiment-name>` env var.
33// 3. The `TF_X_FORCE` env var can be set to force an experimental feature
34// without human verifications.
35//
36package experiment
37
38import (
39 "flag"
40 "fmt"
41 "os"
42 "strconv"
43 "strings"
44 "sync"
45)
46
47// The experiments that are available are listed below. Any package in
48// Terraform defining an experiment should define the experiments below.
49// By keeping them all within the experiment package we force a single point
50// of definition and use. This allows the compiler to enforce references
51// so it becomes easy to remove the features.
52var (
53 // Shadow graph. This is already on by default. Disabling it will be
54 // allowed for awhile in order for it to not block operations.
55 X_shadow = newBasicID("shadow", "SHADOW", false)
56)
57
58// Global variables this package uses because we are a package
59// with global state.
60var (
61 // all is the list of all experiements. Do not modify this.
62 All []ID
63
64 // enabled keeps track of what flags have been enabled
65 enabled map[string]bool
66 enabledLock sync.Mutex
67
68 // Hidden "experiment" that forces all others to be on without verification
69 x_force = newBasicID("force", "FORCE", false)
70)
71
72func init() {
73 // The list of all experiments, update this when an experiment is added.
74 All = []ID{
75 X_shadow,
76 x_force,
77 }
78
79 // Load
80 reload()
81}
82
83// reload is used by tests to reload the global state. This is called by
84// init publicly.
85func reload() {
86 // Initialize
87 enabledLock.Lock()
88 enabled = make(map[string]bool)
89 enabledLock.Unlock()
90
91 // Set defaults and check env vars
92 for _, id := range All {
93 // Get the default value
94 def := id.Default()
95
96 // If we set it in the env var, default it to true
97 key := fmt.Sprintf("TF_X_%s", strings.ToUpper(id.Env()))
98 if v := os.Getenv(key); v != "" {
99 def = v != "0"
100 }
101
102 // Set the default
103 SetEnabled(id, def)
104 }
105}
106
107// Enabled returns whether an experiment has been enabled or not.
108func Enabled(id ID) bool {
109 enabledLock.Lock()
110 defer enabledLock.Unlock()
111 return enabled[id.Flag()]
112}
113
114// SetEnabled sets an experiment to enabled/disabled. Please check with
115// the experiment docs for when calling this actually affects the experiment.
116func SetEnabled(id ID, v bool) {
117 enabledLock.Lock()
118 defer enabledLock.Unlock()
119 enabled[id.Flag()] = v
120}
121
122// Force returns true if the -Xforce of TF_X_FORCE flag is present, which
123// advises users of this package to not verify with the user that they want
124// experimental behavior and to just continue with it.
125func Force() bool {
126 return Enabled(x_force)
127}
128
129// Flag configures the given FlagSet with the flags to configure
130// all active experiments.
131func Flag(fs *flag.FlagSet) {
132 for _, id := range All {
133 desc := id.Flag()
134 key := fmt.Sprintf("X%s", id.Flag())
135 fs.Var(&idValue{X: id}, key, desc)
136 }
137}
138
139// idValue implements flag.Value for setting the enabled/disabled state
140// of an experiment from the CLI.
141type idValue struct {
142 X ID
143}
144
145func (v *idValue) IsBoolFlag() bool { return true }
146func (v *idValue) String() string { return strconv.FormatBool(Enabled(v.X)) }
147func (v *idValue) Set(raw string) error {
148 b, err := strconv.ParseBool(raw)
149 if err == nil {
150 SetEnabled(v.X, b)
151 }
152
153 return err
154}
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/id.go b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go
new file mode 100644
index 0000000..8e2f707
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go
@@ -0,0 +1,34 @@
1package experiment
2
3// ID represents an experimental feature.
4//
5// The global vars defined on this package should be used as ID values.
6// This interface is purposely not implement-able outside of this package
7// so that we can rely on the Go compiler to enforce all experiment references.
8type ID interface {
9 Env() string
10 Flag() string
11 Default() bool
12
13 unexported() // So the ID can't be implemented externally.
14}
15
16// basicID implements ID.
17type basicID struct {
18 EnvValue string
19 FlagValue string
20 DefaultValue bool
21}
22
23func newBasicID(flag, env string, def bool) ID {
24 return &basicID{
25 EnvValue: env,
26 FlagValue: flag,
27 DefaultValue: def,
28 }
29}
30
31func (id *basicID) Env() string { return id.EnvValue }
32func (id *basicID) Flag() string { return id.FlagValue }
33func (id *basicID) Default() bool { return id.DefaultValue }
34func (id *basicID) unexported() {}
diff --git a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
new file mode 100644
index 0000000..64d8263
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
@@ -0,0 +1,22 @@
1package hashcode
2
3import (
4 "hash/crc32"
5)
6
7// String hashes a string to a unique hashcode.
8//
9// crc32 returns a uint32, but for our use we need
10// and non negative integer. Here we cast to an integer
11// and invert it if the result is negative.
12func String(s string) int {
13 v := int(crc32.ChecksumIEEE([]byte(s)))
14 if v >= 0 {
15 return v
16 }
17 if -v >= 0 {
18 return -v
19 }
20 // v == MinInt
21 return 0
22}
diff --git a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go
new file mode 100644
index 0000000..67be1df
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go
@@ -0,0 +1,41 @@
1package hilmapstructure
2
3import (
4 "fmt"
5 "reflect"
6
7 "github.com/mitchellh/mapstructure"
8)
9
10var hilMapstructureDecodeHookEmptySlice []interface{}
11var hilMapstructureDecodeHookStringSlice []string
12var hilMapstructureDecodeHookEmptyMap map[string]interface{}
13
14// WeakDecode behaves in the same way as mapstructure.WeakDecode but has a
15// DecodeHook which defeats the backward compatibility mode of mapstructure
16// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This
17// allows us to use WeakDecode (desirable), but not fail on empty lists.
18func WeakDecode(m interface{}, rawVal interface{}) error {
19 config := &mapstructure.DecoderConfig{
20 DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) {
21 sliceType := reflect.TypeOf(hilMapstructureDecodeHookEmptySlice)
22 stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice)
23 mapType := reflect.TypeOf(hilMapstructureDecodeHookEmptyMap)
24
25 if (source == sliceType || source == stringSliceType) && target == mapType {
26 return nil, fmt.Errorf("Cannot convert a []interface{} into a map[string]interface{}")
27 }
28
29 return val, nil
30 },
31 WeaklyTypedInput: true,
32 Result: rawVal,
33 }
34
35 decoder, err := mapstructure.NewDecoder(config)
36 if err != nil {
37 return err
38 }
39
40 return decoder.Decode(m)
41}
diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
new file mode 100644
index 0000000..433cd77
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
@@ -0,0 +1,100 @@
1package logging
2
3import (
4 "io"
5 "io/ioutil"
6 "log"
7 "os"
8 "strings"
9 "syscall"
10
11 "github.com/hashicorp/logutils"
12)
13
14// These are the environmental variables that determine if we log, and if
15// we log whether or not the log should go to a file.
16const (
17 EnvLog = "TF_LOG" // Set to True
18 EnvLogFile = "TF_LOG_PATH" // Set to a file
19)
20
21var validLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"}
22
23// LogOutput determines where we should send logs (if anywhere) and the log level.
24func LogOutput() (logOutput io.Writer, err error) {
25 logOutput = ioutil.Discard
26
27 logLevel := LogLevel()
28 if logLevel == "" {
29 return
30 }
31
32 logOutput = os.Stderr
33 if logPath := os.Getenv(EnvLogFile); logPath != "" {
34 var err error
35 logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666)
36 if err != nil {
37 return nil, err
38 }
39 }
40
41 // This was the default since the beginning
42 logOutput = &logutils.LevelFilter{
43 Levels: validLevels,
44 MinLevel: logutils.LogLevel(logLevel),
45 Writer: logOutput,
46 }
47
48 return
49}
50
51// SetOutput checks for a log destination with LogOutput, and calls
52// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses
53// ioutil.Discard. Any error from LogOutout is fatal.
54func SetOutput() {
55 out, err := LogOutput()
56 if err != nil {
57 log.Fatal(err)
58 }
59
60 if out == nil {
61 out = ioutil.Discard
62 }
63
64 log.SetOutput(out)
65}
66
67// LogLevel returns the current log level string based the environment vars
68func LogLevel() string {
69 envLevel := os.Getenv(EnvLog)
70 if envLevel == "" {
71 return ""
72 }
73
74 logLevel := "TRACE"
75 if isValidLogLevel(envLevel) {
76 // allow following for better ux: info, Info or INFO
77 logLevel = strings.ToUpper(envLevel)
78 } else {
79 log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v",
80 envLevel, validLevels)
81 }
82
83 return logLevel
84}
85
86// IsDebugOrHigher returns whether or not the current log level is debug or trace
87func IsDebugOrHigher() bool {
88 level := string(LogLevel())
89 return level == "DEBUG" || level == "TRACE"
90}
91
92func isValidLogLevel(level string) bool {
93 for _, l := range validLevels {
94 if strings.ToUpper(level) == string(l) {
95 return true
96 }
97 }
98
99 return false
100}
diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go
new file mode 100644
index 0000000..4477924
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go
@@ -0,0 +1,53 @@
1package logging
2
3import (
4 "log"
5 "net/http"
6 "net/http/httputil"
7)
8
9type transport struct {
10 name string
11 transport http.RoundTripper
12}
13
14func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
15 if IsDebugOrHigher() {
16 reqData, err := httputil.DumpRequestOut(req, true)
17 if err == nil {
18 log.Printf("[DEBUG] "+logReqMsg, t.name, string(reqData))
19 } else {
20 log.Printf("[ERROR] %s API Request error: %#v", t.name, err)
21 }
22 }
23
24 resp, err := t.transport.RoundTrip(req)
25 if err != nil {
26 return resp, err
27 }
28
29 if IsDebugOrHigher() {
30 respData, err := httputil.DumpResponse(resp, true)
31 if err == nil {
32 log.Printf("[DEBUG] "+logRespMsg, t.name, string(respData))
33 } else {
34 log.Printf("[ERROR] %s API Response error: %#v", t.name, err)
35 }
36 }
37
38 return resp, nil
39}
40
41func NewTransport(name string, t http.RoundTripper) *transport {
42 return &transport{name, t}
43}
44
45const logReqMsg = `%s API Request Details:
46---[ REQUEST ]---------------------------------------
47%s
48-----------------------------------------------------`
49
50const logRespMsg = `%s API Response Details:
51---[ RESPONSE ]--------------------------------------
52%s
53-----------------------------------------------------`
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/error.go b/vendor/github.com/hashicorp/terraform/helper/resource/error.go
new file mode 100644
index 0000000..7ee2161
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/error.go
@@ -0,0 +1,79 @@
1package resource
2
3import (
4 "fmt"
5 "strings"
6 "time"
7)
8
9type NotFoundError struct {
10 LastError error
11 LastRequest interface{}
12 LastResponse interface{}
13 Message string
14 Retries int
15}
16
17func (e *NotFoundError) Error() string {
18 if e.Message != "" {
19 return e.Message
20 }
21
22 if e.Retries > 0 {
23 return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
24 }
25
26 return "couldn't find resource"
27}
28
29// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
30type UnexpectedStateError struct {
31 LastError error
32 State string
33 ExpectedState []string
34}
35
36func (e *UnexpectedStateError) Error() string {
37 return fmt.Sprintf(
38 "unexpected state '%s', wanted target '%s'. last error: %s",
39 e.State,
40 strings.Join(e.ExpectedState, ", "),
41 e.LastError,
42 )
43}
44
45// TimeoutError is returned when WaitForState times out
46type TimeoutError struct {
47 LastError error
48 LastState string
49 Timeout time.Duration
50 ExpectedState []string
51}
52
53func (e *TimeoutError) Error() string {
54 expectedState := "resource to be gone"
55 if len(e.ExpectedState) > 0 {
56 expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
57 }
58
59 extraInfo := make([]string, 0)
60 if e.LastState != "" {
61 extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
62 }
63 if e.Timeout > 0 {
64 extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
65 }
66
67 suffix := ""
68 if len(extraInfo) > 0 {
69 suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
70 }
71
72 if e.LastError != nil {
73 return fmt.Sprintf("timeout while waiting for %s%s: %s",
74 expectedState, suffix, e.LastError)
75 }
76
77 return fmt.Sprintf("timeout while waiting for %s%s",
78 expectedState, suffix)
79}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform/helper/resource/id.go
new file mode 100644
index 0000000..629582b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/id.go
@@ -0,0 +1,39 @@
1package resource
2
3import (
4 "crypto/rand"
5 "fmt"
6 "math/big"
7 "sync"
8)
9
10const UniqueIdPrefix = `terraform-`
11
12// idCounter is a randomly seeded monotonic counter for generating ordered
13// unique ids. It uses a big.Int so we can easily increment a long numeric
14// string. The max possible hex value here with 12 random bytes is
15// "01000000000000000000000000", so there's no chance of rollover during
16// operation.
17var idMutex sync.Mutex
18var idCounter = big.NewInt(0).SetBytes(randomBytes(12))
19
20// Helper for a resource to generate a unique identifier w/ default prefix
21func UniqueId() string {
22 return PrefixedUniqueId(UniqueIdPrefix)
23}
24
25// Helper for a resource to generate a unique identifier w/ given prefix
26//
27// After the prefix, the ID consists of an incrementing 26 digit value (to match
28// previous timestamp output).
29func PrefixedUniqueId(prefix string) string {
30 idMutex.Lock()
31 defer idMutex.Unlock()
32 return fmt.Sprintf("%s%026x", prefix, idCounter.Add(idCounter, big.NewInt(1)))
33}
34
35func randomBytes(n int) []byte {
36 b := make([]byte, n)
37 rand.Read(b)
38 return b
39}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/map.go b/vendor/github.com/hashicorp/terraform/helper/resource/map.go
new file mode 100644
index 0000000..a465136
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/map.go
@@ -0,0 +1,140 @@
1package resource
2
3import (
4 "fmt"
5 "sort"
6
7 "github.com/hashicorp/terraform/terraform"
8)
9
10// Map is a map of resources that are supported, and provides helpers for
11// more easily implementing a ResourceProvider.
12type Map struct {
13 Mapping map[string]Resource
14}
15
16func (m *Map) Validate(
17 t string, c *terraform.ResourceConfig) ([]string, []error) {
18 r, ok := m.Mapping[t]
19 if !ok {
20 return nil, []error{fmt.Errorf("Unknown resource type: %s", t)}
21 }
22
23 // If there is no validator set, then it is valid
24 if r.ConfigValidator == nil {
25 return nil, nil
26 }
27
28 return r.ConfigValidator.Validate(c)
29}
30
31// Apply performs a create or update depending on the diff, and calls
32// the proper function on the matching Resource.
33func (m *Map) Apply(
34 info *terraform.InstanceInfo,
35 s *terraform.InstanceState,
36 d *terraform.InstanceDiff,
37 meta interface{}) (*terraform.InstanceState, error) {
38 r, ok := m.Mapping[info.Type]
39 if !ok {
40 return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
41 }
42
43 if d.Destroy || d.RequiresNew() {
44 if s.ID != "" {
45 // Destroy the resource if it is created
46 err := r.Destroy(s, meta)
47 if err != nil {
48 return s, err
49 }
50
51 s.ID = ""
52 }
53
54 // If we're only destroying, and not creating, then return now.
55 // Otherwise, we continue so that we can create a new resource.
56 if !d.RequiresNew() {
57 return nil, nil
58 }
59 }
60
61 var result *terraform.InstanceState
62 var err error
63 if s.ID == "" {
64 result, err = r.Create(s, d, meta)
65 } else {
66 if r.Update == nil {
67 return s, fmt.Errorf(
68 "Resource type '%s' doesn't support update",
69 info.Type)
70 }
71
72 result, err = r.Update(s, d, meta)
73 }
74 if result != nil {
75 if result.Attributes == nil {
76 result.Attributes = make(map[string]string)
77 }
78
79 result.Attributes["id"] = result.ID
80 }
81
82 return result, err
83}
84
85// Diff performs a diff on the proper resource type.
86func (m *Map) Diff(
87 info *terraform.InstanceInfo,
88 s *terraform.InstanceState,
89 c *terraform.ResourceConfig,
90 meta interface{}) (*terraform.InstanceDiff, error) {
91 r, ok := m.Mapping[info.Type]
92 if !ok {
93 return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
94 }
95
96 return r.Diff(s, c, meta)
97}
98
99// Refresh performs a Refresh on the proper resource type.
100//
101// Refresh on the Resource won't be called if the state represents a
102// non-created resource (ID is blank).
103//
104// An error is returned if the resource isn't registered.
105func (m *Map) Refresh(
106 info *terraform.InstanceInfo,
107 s *terraform.InstanceState,
108 meta interface{}) (*terraform.InstanceState, error) {
109 // If the resource isn't created, don't refresh.
110 if s.ID == "" {
111 return s, nil
112 }
113
114 r, ok := m.Mapping[info.Type]
115 if !ok {
116 return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
117 }
118
119 return r.Refresh(s, meta)
120}
121
122// Resources returns all the resources that are supported by this
123// resource map and can be used to satisfy the Resources method of
124// a ResourceProvider.
125func (m *Map) Resources() []terraform.ResourceType {
126 ks := make([]string, 0, len(m.Mapping))
127 for k, _ := range m.Mapping {
128 ks = append(ks, k)
129 }
130 sort.Strings(ks)
131
132 rs := make([]terraform.ResourceType, 0, len(m.Mapping))
133 for _, k := range ks {
134 rs = append(rs, terraform.ResourceType{
135 Name: k,
136 })
137 }
138
139 return rs
140}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go
new file mode 100644
index 0000000..0d9c831
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go
@@ -0,0 +1,49 @@
1package resource
2
3import (
4 "github.com/hashicorp/terraform/helper/config"
5 "github.com/hashicorp/terraform/terraform"
6)
7
8type Resource struct {
9 ConfigValidator *config.Validator
10 Create CreateFunc
11 Destroy DestroyFunc
12 Diff DiffFunc
13 Refresh RefreshFunc
14 Update UpdateFunc
15}
16
17// CreateFunc is a function that creates a resource that didn't previously
18// exist.
19type CreateFunc func(
20 *terraform.InstanceState,
21 *terraform.InstanceDiff,
22 interface{}) (*terraform.InstanceState, error)
23
24// DestroyFunc is a function that destroys a resource that previously
25// exists using the state.
26type DestroyFunc func(
27 *terraform.InstanceState,
28 interface{}) error
29
30// DiffFunc is a function that performs a diff of a resource.
31type DiffFunc func(
32 *terraform.InstanceState,
33 *terraform.ResourceConfig,
34 interface{}) (*terraform.InstanceDiff, error)
35
36// RefreshFunc is a function that performs a refresh of a specific type
37// of resource.
38type RefreshFunc func(
39 *terraform.InstanceState,
40 interface{}) (*terraform.InstanceState, error)
41
42// UpdateFunc is a function that is called to update a resource that
43// previously existed. The difference between this and CreateFunc is that
44// the diff is guaranteed to only contain attributes that don't require
45// a new resource.
46type UpdateFunc func(
47 *terraform.InstanceState,
48 *terraform.InstanceDiff,
49 interface{}) (*terraform.InstanceState, error)
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
new file mode 100644
index 0000000..37c586a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
@@ -0,0 +1,259 @@
1package resource
2
3import (
4 "log"
5 "time"
6)
7
8var refreshGracePeriod = 30 * time.Second
9
10// StateRefreshFunc is a function type used for StateChangeConf that is
11// responsible for refreshing the item being watched for a state change.
12//
13// It returns three results. `result` is any object that will be returned
14// as the final object after waiting for state change. This allows you to
15// return the final updated object, for example an EC2 instance after refreshing
16// it.
17//
18// `state` is the latest state of that object. And `err` is any error that
19// may have happened while refreshing the state.
20type StateRefreshFunc func() (result interface{}, state string, err error)
21
22// StateChangeConf is the configuration struct used for `WaitForState`.
23type StateChangeConf struct {
24 Delay time.Duration // Wait this time before starting checks
25 Pending []string // States that are "allowed" and will continue trying
26 Refresh StateRefreshFunc // Refreshes the current state
27 Target []string // Target state
28 Timeout time.Duration // The amount of time to wait before timeout
29 MinTimeout time.Duration // Smallest time to wait before refreshes
30 PollInterval time.Duration // Override MinTimeout/backoff and only poll this often
31 NotFoundChecks int // Number of times to allow not found
32
33 // This is to work around inconsistent APIs
34 ContinuousTargetOccurence int // Number of times the Target state has to occur continuously
35}
36
37// WaitForState watches an object and waits for it to achieve the state
38// specified in the configuration using the specified Refresh() func,
39// waiting the number of seconds specified in the timeout configuration.
40//
41// If the Refresh function returns a error, exit immediately with that error.
42//
43// If the Refresh function returns a state other than the Target state or one
44// listed in Pending, return immediately with an error.
45//
46// If the Timeout is exceeded before reaching the Target state, return an
47// error.
48//
49// Otherwise, result the result of the first call to the Refresh function to
50// reach the target state.
51func (conf *StateChangeConf) WaitForState() (interface{}, error) {
52 log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target)
53
54 notfoundTick := 0
55 targetOccurence := 0
56
57 // Set a default for times to check for not found
58 if conf.NotFoundChecks == 0 {
59 conf.NotFoundChecks = 20
60 }
61
62 if conf.ContinuousTargetOccurence == 0 {
63 conf.ContinuousTargetOccurence = 1
64 }
65
66 type Result struct {
67 Result interface{}
68 State string
69 Error error
70 Done bool
71 }
72
73 // Read every result from the refresh loop, waiting for a positive result.Done.
74 resCh := make(chan Result, 1)
75 // cancellation channel for the refresh loop
76 cancelCh := make(chan struct{})
77
78 result := Result{}
79
80 go func() {
81 defer close(resCh)
82
83 time.Sleep(conf.Delay)
84
85 // start with 0 delay for the first loop
86 var wait time.Duration
87
88 for {
89 // store the last result
90 resCh <- result
91
92 // wait and watch for cancellation
93 select {
94 case <-cancelCh:
95 return
96 case <-time.After(wait):
97 // first round had no wait
98 if wait == 0 {
99 wait = 100 * time.Millisecond
100 }
101 }
102
103 res, currentState, err := conf.Refresh()
104 result = Result{
105 Result: res,
106 State: currentState,
107 Error: err,
108 }
109
110 if err != nil {
111 resCh <- result
112 return
113 }
114
115 // If we're waiting for the absence of a thing, then return
116 if res == nil && len(conf.Target) == 0 {
117 targetOccurence++
118 if conf.ContinuousTargetOccurence == targetOccurence {
119 result.Done = true
120 resCh <- result
121 return
122 }
123 continue
124 }
125
126 if res == nil {
127 // If we didn't find the resource, check if we have been
128 // not finding it for awhile, and if so, report an error.
129 notfoundTick++
130 if notfoundTick > conf.NotFoundChecks {
131 result.Error = &NotFoundError{
132 LastError: err,
133 Retries: notfoundTick,
134 }
135 resCh <- result
136 return
137 }
138 } else {
139 // Reset the counter for when a resource isn't found
140 notfoundTick = 0
141 found := false
142
143 for _, allowed := range conf.Target {
144 if currentState == allowed {
145 found = true
146 targetOccurence++
147 if conf.ContinuousTargetOccurence == targetOccurence {
148 result.Done = true
149 resCh <- result
150 return
151 }
152 continue
153 }
154 }
155
156 for _, allowed := range conf.Pending {
157 if currentState == allowed {
158 found = true
159 targetOccurence = 0
160 break
161 }
162 }
163
164 if !found && len(conf.Pending) > 0 {
165 result.Error = &UnexpectedStateError{
166 LastError: err,
167 State: result.State,
168 ExpectedState: conf.Target,
169 }
170 resCh <- result
171 return
172 }
173 }
174
175 // Wait between refreshes using exponential backoff, except when
176 // waiting for the target state to reoccur.
177 if targetOccurence == 0 {
178 wait *= 2
179 }
180
181 // If a poll interval has been specified, choose that interval.
182 // Otherwise bound the default value.
183 if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
184 wait = conf.PollInterval
185 } else {
186 if wait < conf.MinTimeout {
187 wait = conf.MinTimeout
188 } else if wait > 10*time.Second {
189 wait = 10 * time.Second
190 }
191 }
192
193 log.Printf("[TRACE] Waiting %s before next try", wait)
194 }
195 }()
196
197 // store the last value result from the refresh loop
198 lastResult := Result{}
199
200 timeout := time.After(conf.Timeout)
201 for {
202 select {
203 case r, ok := <-resCh:
204 // channel closed, so return the last result
205 if !ok {
206 return lastResult.Result, lastResult.Error
207 }
208
209 // we reached the intended state
210 if r.Done {
211 return r.Result, r.Error
212 }
213
214 // still waiting, store the last result
215 lastResult = r
216
217 case <-timeout:
218 log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout)
219 log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
220
221 // cancel the goroutine and start our grace period timer
222 close(cancelCh)
223 timeout := time.After(refreshGracePeriod)
224
225 // we need a for loop and a label to break on, because we may have
226 // an extra response value to read, but still want to wait for the
227 // channel to close.
228 forSelect:
229 for {
230 select {
231 case r, ok := <-resCh:
232 if r.Done {
233 // the last refresh loop reached the desired state
234 return r.Result, r.Error
235 }
236
237 if !ok {
238 // the goroutine returned
239 break forSelect
240 }
241
242 // target state not reached, save the result for the
243 // TimeoutError and wait for the channel to close
244 lastResult = r
245 case <-timeout:
246 log.Println("[ERROR] WaitForState exceeded refresh grace period")
247 break forSelect
248 }
249 }
250
251 return nil, &TimeoutError{
252 LastError: lastResult.Error,
253 LastState: lastResult.State,
254 Timeout: conf.Timeout,
255 ExpectedState: conf.Target,
256 }
257 }
258 }
259}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
new file mode 100644
index 0000000..04367c5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
@@ -0,0 +1,790 @@
1package resource
2
3import (
4 "fmt"
5 "io"
6 "io/ioutil"
7 "log"
8 "os"
9 "path/filepath"
10 "reflect"
11 "regexp"
12 "strings"
13 "testing"
14
15 "github.com/davecgh/go-spew/spew"
16 "github.com/hashicorp/go-getter"
17 "github.com/hashicorp/go-multierror"
18 "github.com/hashicorp/terraform/config/module"
19 "github.com/hashicorp/terraform/helper/logging"
20 "github.com/hashicorp/terraform/terraform"
21)
22
23const TestEnvVar = "TF_ACC"
24
25// TestProvider can be implemented by any ResourceProvider to provide custom
26// reset functionality at the start of an acceptance test.
27// The helper/schema Provider implements this interface.
28type TestProvider interface {
29 TestReset() error
30}
31
32// TestCheckFunc is the callback type used with acceptance tests to check
33// the state of a resource. The state passed in is the latest state known,
34// or in the case of being after a destroy, it is the last known state when
35// it was created.
36type TestCheckFunc func(*terraform.State) error
37
38// ImportStateCheckFunc is the check function for ImportState tests
39type ImportStateCheckFunc func([]*terraform.InstanceState) error
40
41// TestCase is a single acceptance test case used to test the apply/destroy
42// lifecycle of a resource in a specific configuration.
43//
44// When the destroy plan is executed, the config from the last TestStep
45// is used to plan it.
46type TestCase struct {
47 // IsUnitTest allows a test to run regardless of the TF_ACC
48 // environment variable. This should be used with care - only for
49 // fast tests on local resources (e.g. remote state with a local
50 // backend) but can be used to increase confidence in correct
51 // operation of Terraform without waiting for a full acctest run.
52 IsUnitTest bool
53
54 // PreCheck, if non-nil, will be called before any test steps are
55 // executed. It will only be executed in the case that the steps
56 // would run, so it can be used for some validation before running
57 // acceptance tests, such as verifying that keys are setup.
58 PreCheck func()
59
60 // Providers is the ResourceProvider that will be under test.
61 //
62 // Alternately, ProviderFactories can be specified for the providers
63 // that are valid. This takes priority over Providers.
64 //
65 // The end effect of each is the same: specifying the providers that
66 // are used within the tests.
67 Providers map[string]terraform.ResourceProvider
68 ProviderFactories map[string]terraform.ResourceProviderFactory
69
70 // PreventPostDestroyRefresh can be set to true for cases where data sources
71 // are tested alongside real resources
72 PreventPostDestroyRefresh bool
73
74 // CheckDestroy is called after the resource is finally destroyed
75 // to allow the tester to test that the resource is truly gone.
76 CheckDestroy TestCheckFunc
77
78 // Steps are the apply sequences done within the context of the
79 // same state. Each step can have its own check to verify correctness.
80 Steps []TestStep
81
82 // The settings below control the "ID-only refresh test." This is
83 // an enabled-by-default test that tests that a refresh can be
84 // refreshed with only an ID to result in the same attributes.
85 // This validates completeness of Refresh.
86 //
87 // IDRefreshName is the name of the resource to check. This will
88 // default to the first non-nil primary resource in the state.
89 //
90 // IDRefreshIgnore is a list of configuration keys that will be ignored.
91 IDRefreshName string
92 IDRefreshIgnore []string
93}
94
95// TestStep is a single apply sequence of a test, done within the
96// context of a state.
97//
98// Multiple TestSteps can be sequenced in a Test to allow testing
99// potentially complex update logic. In general, simply create/destroy
100// tests will only need one step.
101type TestStep struct {
102 // ResourceName should be set to the name of the resource
103 // that is being tested. Example: "aws_instance.foo". Various test
104 // modes use this to auto-detect state information.
105 //
106 // This is only required if the test mode settings below say it is
107 // for the mode you're using.
108 ResourceName string
109
110 // PreConfig is called before the Config is applied to perform any per-step
111 // setup that needs to happen. This is called regardless of "test mode"
112 // below.
113 PreConfig func()
114
115 //---------------------------------------------------------------
116 // Test modes. One of the following groups of settings must be
117 // set to determine what the test step will do. Ideally we would've
118 // used Go interfaces here but there are now hundreds of tests we don't
119 // want to re-type so instead we just determine which step logic
120 // to run based on what settings below are set.
121 //---------------------------------------------------------------
122
123 //---------------------------------------------------------------
124 // Plan, Apply testing
125 //---------------------------------------------------------------
126
127 // Config a string of the configuration to give to Terraform. If this
128 // is set, then the TestCase will execute this step with the same logic
129 // as a `terraform apply`.
130 Config string
131
132 // Check is called after the Config is applied. Use this step to
133 // make your own API calls to check the status of things, and to
134 // inspect the format of the ResourceState itself.
135 //
136 // If an error is returned, the test will fail. In this case, a
137 // destroy plan will still be attempted.
138 //
139 // If this is nil, no check is done on this step.
140 Check TestCheckFunc
141
142 // Destroy will create a destroy plan if set to true.
143 Destroy bool
144
145 // ExpectNonEmptyPlan can be set to true for specific types of tests that are
146 // looking to verify that a diff occurs
147 ExpectNonEmptyPlan bool
148
149 // ExpectError allows the construction of test cases that we expect to fail
150 // with an error. The specified regexp must match against the error for the
151 // test to pass.
152 ExpectError *regexp.Regexp
153
154 // PlanOnly can be set to only run `plan` with this configuration, and not
155 // actually apply it. This is useful for ensuring config changes result in
156 // no-op plans
157 PlanOnly bool
158
159 // PreventPostDestroyRefresh can be set to true for cases where data sources
160 // are tested alongside real resources
161 PreventPostDestroyRefresh bool
162
163 //---------------------------------------------------------------
164 // ImportState testing
165 //---------------------------------------------------------------
166
167 // ImportState, if true, will test the functionality of ImportState
168 // by importing the resource with ResourceName (must be set) and the
169 // ID of that resource.
170 ImportState bool
171
172 // ImportStateId is the ID to perform an ImportState operation with.
173 // This is optional. If it isn't set, then the resource ID is automatically
174 // determined by inspecting the state for ResourceName's ID.
175 ImportStateId string
176
177 // ImportStateIdPrefix is the prefix added in front of ImportStateId.
178 // This can be useful in complex import cases, where more than one
179 // attribute needs to be passed on as the Import ID. Mainly in cases
180 // where the ID is not known, and a known prefix needs to be added to
181 // the unset ImportStateId field.
182 ImportStateIdPrefix string
183
184 // ImportStateCheck checks the results of ImportState. It should be
185 // used to verify that the resulting value of ImportState has the
186 // proper resources, IDs, and attributes.
187 ImportStateCheck ImportStateCheckFunc
188
189 // ImportStateVerify, if true, will also check that the state values
190 // that are finally put into the state after import match for all the
191 // IDs returned by the Import.
192 //
193 // ImportStateVerifyIgnore are fields that should not be verified to
194 // be equal. These can be set to ephemeral fields or fields that can't
195 // be refreshed and don't matter.
196 ImportStateVerify bool
197 ImportStateVerifyIgnore []string
198}
199
200// Test performs an acceptance test on a resource.
201//
202// Tests are not run unless an environmental variable "TF_ACC" is
203// set to some non-empty value. This is to avoid test cases surprising
204// a user by creating real resources.
205//
206// Tests will fail unless the verbose flag (`go test -v`, or explicitly
207// the "-test.v" flag) is set. Because some acceptance tests take quite
208// long, we require the verbose flag so users are able to see progress
209// output.
210func Test(t TestT, c TestCase) {
211 // We only run acceptance tests if an env var is set because they're
212 // slow and generally require some outside configuration. You can opt out
213 // of this with OverrideEnvVar on individual TestCases.
214 if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest {
215 t.Skip(fmt.Sprintf(
216 "Acceptance tests skipped unless env '%s' set",
217 TestEnvVar))
218 return
219 }
220
221 logWriter, err := logging.LogOutput()
222 if err != nil {
223 t.Error(fmt.Errorf("error setting up logging: %s", err))
224 }
225 log.SetOutput(logWriter)
226
227 // We require verbose mode so that the user knows what is going on.
228 if !testTesting && !testing.Verbose() && !c.IsUnitTest {
229 t.Fatal("Acceptance tests must be run with the -v flag on tests")
230 return
231 }
232
233 // Run the PreCheck if we have it
234 if c.PreCheck != nil {
235 c.PreCheck()
236 }
237
238 ctxProviders, err := testProviderFactories(c)
239 if err != nil {
240 t.Fatal(err)
241 }
242 opts := terraform.ContextOpts{Providers: ctxProviders}
243
244 // A single state variable to track the lifecycle, starting with no state
245 var state *terraform.State
246
247 // Go through each step and run it
248 var idRefreshCheck *terraform.ResourceState
249 idRefresh := c.IDRefreshName != ""
250 errored := false
251 for i, step := range c.Steps {
252 var err error
253 log.Printf("[WARN] Test: Executing step %d", i)
254
255 // Determine the test mode to execute
256 if step.Config != "" {
257 state, err = testStepConfig(opts, state, step)
258 } else if step.ImportState {
259 state, err = testStepImportState(opts, state, step)
260 } else {
261 err = fmt.Errorf(
262 "unknown test mode for step. Please see TestStep docs\n\n%#v",
263 step)
264 }
265
266 // If there was an error, exit
267 if err != nil {
268 // Perhaps we expected an error? Check if it matches
269 if step.ExpectError != nil {
270 if !step.ExpectError.MatchString(err.Error()) {
271 errored = true
272 t.Error(fmt.Sprintf(
273 "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n",
274 i, err, step.ExpectError))
275 break
276 }
277 } else {
278 errored = true
279 t.Error(fmt.Sprintf(
280 "Step %d error: %s", i, err))
281 break
282 }
283 }
284
285 // If we've never checked an id-only refresh and our state isn't
286 // empty, find the first resource and test it.
287 if idRefresh && idRefreshCheck == nil && !state.Empty() {
288 // Find the first non-nil resource in the state
289 for _, m := range state.Modules {
290 if len(m.Resources) > 0 {
291 if v, ok := m.Resources[c.IDRefreshName]; ok {
292 idRefreshCheck = v
293 }
294
295 break
296 }
297 }
298
299 // If we have an instance to check for refreshes, do it
300 // immediately. We do it in the middle of another test
301 // because it shouldn't affect the overall state (refresh
302 // is read-only semantically) and we want to fail early if
303 // this fails. If refresh isn't read-only, then this will have
304 // caught a different bug.
305 if idRefreshCheck != nil {
306 log.Printf(
307 "[WARN] Test: Running ID-only refresh check on %s",
308 idRefreshCheck.Primary.ID)
309 if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil {
310 log.Printf("[ERROR] Test: ID-only test failed: %s", err)
311 t.Error(fmt.Sprintf(
312 "[ERROR] Test: ID-only test failed: %s", err))
313 break
314 }
315 }
316 }
317 }
318
319 // If we never checked an id-only refresh, it is a failure.
320 if idRefresh {
321 if !errored && len(c.Steps) > 0 && idRefreshCheck == nil {
322 t.Error("ID-only refresh check never ran.")
323 }
324 }
325
326 // If we have a state, then run the destroy
327 if state != nil {
328 lastStep := c.Steps[len(c.Steps)-1]
329 destroyStep := TestStep{
330 Config: lastStep.Config,
331 Check: c.CheckDestroy,
332 Destroy: true,
333 PreventPostDestroyRefresh: c.PreventPostDestroyRefresh,
334 }
335
336 log.Printf("[WARN] Test: Executing destroy step")
337 state, err := testStep(opts, state, destroyStep)
338 if err != nil {
339 t.Error(fmt.Sprintf(
340 "Error destroying resource! WARNING: Dangling resources\n"+
341 "may exist. The full state and error is shown below.\n\n"+
342 "Error: %s\n\nState: %s",
343 err,
344 state))
345 }
346 } else {
347 log.Printf("[WARN] Skipping destroy test since there is no state.")
348 }
349}
350
351// testProviderFactories is a helper to build the ResourceProviderFactory map
352// with pre instantiated ResourceProviders, so that we can reset them for the
353// test, while only calling the factory function once.
354// Any errors are stored so that they can be returned by the factory in
355// terraform to match non-test behavior.
356func testProviderFactories(c TestCase) (map[string]terraform.ResourceProviderFactory, error) {
357 ctxProviders := c.ProviderFactories // make(map[string]terraform.ResourceProviderFactory)
358 if ctxProviders == nil {
359 ctxProviders = make(map[string]terraform.ResourceProviderFactory)
360 }
361 // add any fixed providers
362 for k, p := range c.Providers {
363 ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p)
364 }
365
366 // reset the providers if needed
367 for k, pf := range ctxProviders {
368 // we can ignore any errors here, if we don't have a provider to reset
369 // the error will be handled later
370 p, err := pf()
371 if err != nil {
372 return nil, err
373 }
374 if p, ok := p.(TestProvider); ok {
375 err := p.TestReset()
376 if err != nil {
377 return nil, fmt.Errorf("[ERROR] failed to reset provider %q: %s", k, err)
378 }
379 }
380 }
381
382 return ctxProviders, nil
383}
384
385// UnitTest is a helper to force the acceptance testing harness to run in the
386// normal unit test suite. This should only be used for resource that don't
387// have any external dependencies.
388func UnitTest(t TestT, c TestCase) {
389 c.IsUnitTest = true
390 Test(t, c)
391}
392
393func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error {
394 // TODO: We guard by this right now so master doesn't explode. We
395 // need to remove this eventually to make this part of the normal tests.
396 if os.Getenv("TF_ACC_IDONLY") == "" {
397 return nil
398 }
399
400 name := fmt.Sprintf("%s.foo", r.Type)
401
402 // Build the state. The state is just the resource with an ID. There
403 // are no attributes. We only set what is needed to perform a refresh.
404 state := terraform.NewState()
405 state.RootModule().Resources[name] = &terraform.ResourceState{
406 Type: r.Type,
407 Primary: &terraform.InstanceState{
408 ID: r.Primary.ID,
409 },
410 }
411
412 // Create the config module. We use the full config because Refresh
413 // doesn't have access to it and we may need things like provider
414 // configurations. The initial implementation of id-only checks used
415 // an empty config module, but that caused the aforementioned problems.
416 mod, err := testModule(opts, step)
417 if err != nil {
418 return err
419 }
420
421 // Initialize the context
422 opts.Module = mod
423 opts.State = state
424 ctx, err := terraform.NewContext(&opts)
425 if err != nil {
426 return err
427 }
428 if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {
429 if len(es) > 0 {
430 estrs := make([]string, len(es))
431 for i, e := range es {
432 estrs[i] = e.Error()
433 }
434 return fmt.Errorf(
435 "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
436 ws, estrs)
437 }
438
439 log.Printf("[WARN] Config warnings: %#v", ws)
440 }
441
442 // Refresh!
443 state, err = ctx.Refresh()
444 if err != nil {
445 return fmt.Errorf("Error refreshing: %s", err)
446 }
447
448 // Verify attribute equivalence.
449 actualR := state.RootModule().Resources[name]
450 if actualR == nil {
451 return fmt.Errorf("Resource gone!")
452 }
453 if actualR.Primary == nil {
454 return fmt.Errorf("Resource has no primary instance")
455 }
456 actual := actualR.Primary.Attributes
457 expected := r.Primary.Attributes
458 // Remove fields we're ignoring
459 for _, v := range c.IDRefreshIgnore {
460 for k, _ := range actual {
461 if strings.HasPrefix(k, v) {
462 delete(actual, k)
463 }
464 }
465 for k, _ := range expected {
466 if strings.HasPrefix(k, v) {
467 delete(expected, k)
468 }
469 }
470 }
471
472 if !reflect.DeepEqual(actual, expected) {
473 // Determine only the different attributes
474 for k, v := range expected {
475 if av, ok := actual[k]; ok && v == av {
476 delete(expected, k)
477 delete(actual, k)
478 }
479 }
480
481 spewConf := spew.NewDefaultConfig()
482 spewConf.SortKeys = true
483 return fmt.Errorf(
484 "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+
485 "\n\n%s\n\n%s",
486 spewConf.Sdump(actual), spewConf.Sdump(expected))
487 }
488
489 return nil
490}
491
492func testModule(
493 opts terraform.ContextOpts,
494 step TestStep) (*module.Tree, error) {
495 if step.PreConfig != nil {
496 step.PreConfig()
497 }
498
499 cfgPath, err := ioutil.TempDir("", "tf-test")
500 if err != nil {
501 return nil, fmt.Errorf(
502 "Error creating temporary directory for config: %s", err)
503 }
504 defer os.RemoveAll(cfgPath)
505
506 // Write the configuration
507 cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf"))
508 if err != nil {
509 return nil, fmt.Errorf(
510 "Error creating temporary file for config: %s", err)
511 }
512
513 _, err = io.Copy(cfgF, strings.NewReader(step.Config))
514 cfgF.Close()
515 if err != nil {
516 return nil, fmt.Errorf(
517 "Error creating temporary file for config: %s", err)
518 }
519
520 // Parse the configuration
521 mod, err := module.NewTreeModule("", cfgPath)
522 if err != nil {
523 return nil, fmt.Errorf(
524 "Error loading configuration: %s", err)
525 }
526
527 // Load the modules
528 modStorage := &getter.FolderStorage{
529 StorageDir: filepath.Join(cfgPath, ".tfmodules"),
530 }
531 err = mod.Load(modStorage, module.GetModeGet)
532 if err != nil {
533 return nil, fmt.Errorf("Error downloading modules: %s", err)
534 }
535
536 return mod, nil
537}
538
539func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) {
540 if c.ResourceName == "" {
541 return nil, fmt.Errorf("ResourceName must be set in TestStep")
542 }
543
544 for _, m := range state.Modules {
545 if len(m.Resources) > 0 {
546 if v, ok := m.Resources[c.ResourceName]; ok {
547 return v, nil
548 }
549 }
550 }
551
552 return nil, fmt.Errorf(
553 "Resource specified by ResourceName couldn't be found: %s", c.ResourceName)
554}
555
556// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into
557// a single TestCheckFunc.
558//
559// As a user testing their provider, this lets you decompose your checks
560// into smaller pieces more easily.
561func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {
562 return func(s *terraform.State) error {
563 for i, f := range fs {
564 if err := f(s); err != nil {
565 return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)
566 }
567 }
568
569 return nil
570 }
571}
572
573// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into
574// a single TestCheckFunc.
575//
576// As a user testing their provider, this lets you decompose your checks
577// into smaller pieces more easily.
578//
579// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the
580// TestCheckFuncs and aggregates failures.
581func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {
582 return func(s *terraform.State) error {
583 var result *multierror.Error
584
585 for i, f := range fs {
586 if err := f(s); err != nil {
587 result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err))
588 }
589 }
590
591 return result.ErrorOrNil()
592 }
593}
594
595// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value
596// exists in state for the given name/key combination. It is useful when
597// testing that computed values were set, when it is not possible to
598// know ahead of time what the values will be.
599func TestCheckResourceAttrSet(name, key string) TestCheckFunc {
600 return func(s *terraform.State) error {
601 is, err := primaryInstanceState(s, name)
602 if err != nil {
603 return err
604 }
605
606 if val, ok := is.Attributes[key]; ok && val != "" {
607 return nil
608 }
609
610 return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key)
611 }
612}
613
614// TestCheckResourceAttr is a TestCheckFunc which validates
615// the value in state for the given name/key combination.
616func TestCheckResourceAttr(name, key, value string) TestCheckFunc {
617 return func(s *terraform.State) error {
618 is, err := primaryInstanceState(s, name)
619 if err != nil {
620 return err
621 }
622
623 if v, ok := is.Attributes[key]; !ok || v != value {
624 if !ok {
625 return fmt.Errorf("%s: Attribute '%s' not found", name, key)
626 }
627
628 return fmt.Errorf(
629 "%s: Attribute '%s' expected %#v, got %#v",
630 name,
631 key,
632 value,
633 v)
634 }
635
636 return nil
637 }
638}
639
640// TestCheckNoResourceAttr is a TestCheckFunc which ensures that
641// NO value exists in state for the given name/key combination.
642func TestCheckNoResourceAttr(name, key string) TestCheckFunc {
643 return func(s *terraform.State) error {
644 is, err := primaryInstanceState(s, name)
645 if err != nil {
646 return err
647 }
648
649 if _, ok := is.Attributes[key]; ok {
650 return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key)
651 }
652
653 return nil
654 }
655}
656
657// TestMatchResourceAttr is a TestCheckFunc which checks that the value
658// in state for the given name/key combination matches the given regex.
659func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc {
660 return func(s *terraform.State) error {
661 is, err := primaryInstanceState(s, name)
662 if err != nil {
663 return err
664 }
665
666 if !r.MatchString(is.Attributes[key]) {
667 return fmt.Errorf(
668 "%s: Attribute '%s' didn't match %q, got %#v",
669 name,
670 key,
671 r.String(),
672 is.Attributes[key])
673 }
674
675 return nil
676 }
677}
678
679// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the
680// value is a pointer so that it can be updated while the test is running.
681// It will only be dereferenced at the point this step is run.
682func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc {
683 return func(s *terraform.State) error {
684 return TestCheckResourceAttr(name, key, *value)(s)
685 }
686}
687
688// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values
689// in state for a pair of name/key combinations are equal.
690func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc {
691 return func(s *terraform.State) error {
692 isFirst, err := primaryInstanceState(s, nameFirst)
693 if err != nil {
694 return err
695 }
696 vFirst, ok := isFirst.Attributes[keyFirst]
697 if !ok {
698 return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst)
699 }
700
701 isSecond, err := primaryInstanceState(s, nameSecond)
702 if err != nil {
703 return err
704 }
705 vSecond, ok := isSecond.Attributes[keySecond]
706 if !ok {
707 return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond)
708 }
709
710 if vFirst != vSecond {
711 return fmt.Errorf(
712 "%s: Attribute '%s' expected %#v, got %#v",
713 nameFirst,
714 keyFirst,
715 vSecond,
716 vFirst)
717 }
718
719 return nil
720 }
721}
722
723// TestCheckOutput checks an output in the Terraform configuration
724func TestCheckOutput(name, value string) TestCheckFunc {
725 return func(s *terraform.State) error {
726 ms := s.RootModule()
727 rs, ok := ms.Outputs[name]
728 if !ok {
729 return fmt.Errorf("Not found: %s", name)
730 }
731
732 if rs.Value != value {
733 return fmt.Errorf(
734 "Output '%s': expected %#v, got %#v",
735 name,
736 value,
737 rs)
738 }
739
740 return nil
741 }
742}
743
744func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc {
745 return func(s *terraform.State) error {
746 ms := s.RootModule()
747 rs, ok := ms.Outputs[name]
748 if !ok {
749 return fmt.Errorf("Not found: %s", name)
750 }
751
752 if !r.MatchString(rs.Value.(string)) {
753 return fmt.Errorf(
754 "Output '%s': %#v didn't match %q",
755 name,
756 rs,
757 r.String())
758 }
759
760 return nil
761 }
762}
763
764// TestT is the interface used to handle the test lifecycle of a test.
765//
766// Users should just use a *testing.T object, which implements this.
767type TestT interface {
768 Error(args ...interface{})
769 Fatal(args ...interface{})
770 Skip(args ...interface{})
771}
772
773// This is set to true by unit tests to alter some behavior
774var testTesting = false
775
776// primaryInstanceState returns the primary instance state for the given resource name.
777func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) {
778 ms := s.RootModule()
779 rs, ok := ms.Resources[name]
780 if !ok {
781 return nil, fmt.Errorf("Not found: %s", name)
782 }
783
784 is := rs.Primary
785 if is == nil {
786 return nil, fmt.Errorf("No primary instance: %s", name)
787 }
788
789 return is, nil
790}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
new file mode 100644
index 0000000..537a11c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
@@ -0,0 +1,160 @@
1package resource
2
3import (
4 "fmt"
5 "log"
6 "strings"
7
8 "github.com/hashicorp/terraform/terraform"
9)
10
11// testStepConfig runs a config-mode test step
12func testStepConfig(
13 opts terraform.ContextOpts,
14 state *terraform.State,
15 step TestStep) (*terraform.State, error) {
16 return testStep(opts, state, step)
17}
18
19func testStep(
20 opts terraform.ContextOpts,
21 state *terraform.State,
22 step TestStep) (*terraform.State, error) {
23 mod, err := testModule(opts, step)
24 if err != nil {
25 return state, err
26 }
27
28 // Build the context
29 opts.Module = mod
30 opts.State = state
31 opts.Destroy = step.Destroy
32 ctx, err := terraform.NewContext(&opts)
33 if err != nil {
34 return state, fmt.Errorf("Error initializing context: %s", err)
35 }
36 if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {
37 if len(es) > 0 {
38 estrs := make([]string, len(es))
39 for i, e := range es {
40 estrs[i] = e.Error()
41 }
42 return state, fmt.Errorf(
43 "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
44 ws, estrs)
45 }
46 log.Printf("[WARN] Config warnings: %#v", ws)
47 }
48
49 // Refresh!
50 state, err = ctx.Refresh()
51 if err != nil {
52 return state, fmt.Errorf(
53 "Error refreshing: %s", err)
54 }
55
56 // If this step is a PlanOnly step, skip over this first Plan and subsequent
57 // Apply, and use the follow up Plan that checks for perpetual diffs
58 if !step.PlanOnly {
59 // Plan!
60 if p, err := ctx.Plan(); err != nil {
61 return state, fmt.Errorf(
62 "Error planning: %s", err)
63 } else {
64 log.Printf("[WARN] Test: Step plan: %s", p)
65 }
66
67 // We need to keep a copy of the state prior to destroying
68 // such that destroy steps can verify their behaviour in the check
69 // function
70 stateBeforeApplication := state.DeepCopy()
71
72 // Apply!
73 state, err = ctx.Apply()
74 if err != nil {
75 return state, fmt.Errorf("Error applying: %s", err)
76 }
77
78 // Check! Excitement!
79 if step.Check != nil {
80 if step.Destroy {
81 if err := step.Check(stateBeforeApplication); err != nil {
82 return state, fmt.Errorf("Check failed: %s", err)
83 }
84 } else {
85 if err := step.Check(state); err != nil {
86 return state, fmt.Errorf("Check failed: %s", err)
87 }
88 }
89 }
90 }
91
92 // Now, verify that Plan is now empty and we don't have a perpetual diff issue
93 // We do this with TWO plans. One without a refresh.
94 var p *terraform.Plan
95 if p, err = ctx.Plan(); err != nil {
96 return state, fmt.Errorf("Error on follow-up plan: %s", err)
97 }
98 if p.Diff != nil && !p.Diff.Empty() {
99 if step.ExpectNonEmptyPlan {
100 log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p)
101 } else {
102 return state, fmt.Errorf(
103 "After applying this step, the plan was not empty:\n\n%s", p)
104 }
105 }
106
107 // And another after a Refresh.
108 if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) {
109 state, err = ctx.Refresh()
110 if err != nil {
111 return state, fmt.Errorf(
112 "Error on follow-up refresh: %s", err)
113 }
114 }
115 if p, err = ctx.Plan(); err != nil {
116 return state, fmt.Errorf("Error on second follow-up plan: %s", err)
117 }
118 empty := p.Diff == nil || p.Diff.Empty()
119
120 // Data resources are tricky because they legitimately get instantiated
121 // during refresh so that they will be already populated during the
122 // plan walk. Because of this, if we have any data resources in the
123 // config we'll end up wanting to destroy them again here. This is
124 // acceptable and expected, and we'll treat it as "empty" for the
125 // sake of this testing.
126 if step.Destroy {
127 empty = true
128
129 for _, moduleDiff := range p.Diff.Modules {
130 for k, instanceDiff := range moduleDiff.Resources {
131 if !strings.HasPrefix(k, "data.") {
132 empty = false
133 break
134 }
135
136 if !instanceDiff.Destroy {
137 empty = false
138 }
139 }
140 }
141 }
142
143 if !empty {
144 if step.ExpectNonEmptyPlan {
145 log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p)
146 } else {
147 return state, fmt.Errorf(
148 "After applying this step and refreshing, "+
149 "the plan was not empty:\n\n%s", p)
150 }
151 }
152
153 // Made it here, but expected a non-empty plan, fail!
154 if step.ExpectNonEmptyPlan && (p.Diff == nil || p.Diff.Empty()) {
155 return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!")
156 }
157
158 // Made it here? Good job test step!
159 return state, nil
160}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
new file mode 100644
index 0000000..28ad105
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
@@ -0,0 +1,141 @@
1package resource
2
3import (
4 "fmt"
5 "log"
6 "reflect"
7 "strings"
8
9 "github.com/davecgh/go-spew/spew"
10 "github.com/hashicorp/terraform/terraform"
11)
12
13// testStepImportState runs an imort state test step
14func testStepImportState(
15 opts terraform.ContextOpts,
16 state *terraform.State,
17 step TestStep) (*terraform.State, error) {
18 // Determine the ID to import
19 importId := step.ImportStateId
20 if importId == "" {
21 resource, err := testResource(step, state)
22 if err != nil {
23 return state, err
24 }
25
26 importId = resource.Primary.ID
27 }
28 importPrefix := step.ImportStateIdPrefix
29 if importPrefix != "" {
30 importId = fmt.Sprintf("%s%s", importPrefix, importId)
31 }
32
33 // Setup the context. We initialize with an empty state. We use the
34 // full config for provider configurations.
35 mod, err := testModule(opts, step)
36 if err != nil {
37 return state, err
38 }
39
40 opts.Module = mod
41 opts.State = terraform.NewState()
42 ctx, err := terraform.NewContext(&opts)
43 if err != nil {
44 return state, err
45 }
46
47 // Do the import!
48 newState, err := ctx.Import(&terraform.ImportOpts{
49 // Set the module so that any provider config is loaded
50 Module: mod,
51
52 Targets: []*terraform.ImportTarget{
53 &terraform.ImportTarget{
54 Addr: step.ResourceName,
55 ID: importId,
56 },
57 },
58 })
59 if err != nil {
60 log.Printf("[ERROR] Test: ImportState failure: %s", err)
61 return state, err
62 }
63
64 // Go through the new state and verify
65 if step.ImportStateCheck != nil {
66 var states []*terraform.InstanceState
67 for _, r := range newState.RootModule().Resources {
68 if r.Primary != nil {
69 states = append(states, r.Primary)
70 }
71 }
72 if err := step.ImportStateCheck(states); err != nil {
73 return state, err
74 }
75 }
76
77 // Verify that all the states match
78 if step.ImportStateVerify {
79 new := newState.RootModule().Resources
80 old := state.RootModule().Resources
81 for _, r := range new {
82 // Find the existing resource
83 var oldR *terraform.ResourceState
84 for _, r2 := range old {
85 if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type {
86 oldR = r2
87 break
88 }
89 }
90 if oldR == nil {
91 return state, fmt.Errorf(
92 "Failed state verification, resource with ID %s not found",
93 r.Primary.ID)
94 }
95
96 // Compare their attributes
97 actual := make(map[string]string)
98 for k, v := range r.Primary.Attributes {
99 actual[k] = v
100 }
101 expected := make(map[string]string)
102 for k, v := range oldR.Primary.Attributes {
103 expected[k] = v
104 }
105
106 // Remove fields we're ignoring
107 for _, v := range step.ImportStateVerifyIgnore {
108 for k, _ := range actual {
109 if strings.HasPrefix(k, v) {
110 delete(actual, k)
111 }
112 }
113 for k, _ := range expected {
114 if strings.HasPrefix(k, v) {
115 delete(expected, k)
116 }
117 }
118 }
119
120 if !reflect.DeepEqual(actual, expected) {
121 // Determine only the different attributes
122 for k, v := range expected {
123 if av, ok := actual[k]; ok && v == av {
124 delete(expected, k)
125 delete(actual, k)
126 }
127 }
128
129 spewConf := spew.NewDefaultConfig()
130 spewConf.SortKeys = true
131 return state, fmt.Errorf(
132 "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+
133 "\n\n%s\n\n%s",
134 spewConf.Sdump(actual), spewConf.Sdump(expected))
135 }
136 }
137 }
138
139 // Return the old state (non-imported) so we don't change anything.
140 return state, nil
141}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
new file mode 100644
index 0000000..ca50e29
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
@@ -0,0 +1,84 @@
1package resource
2
3import (
4 "sync"
5 "time"
6)
7
8// Retry is a basic wrapper around StateChangeConf that will just retry
9// a function until it no longer returns an error.
10func Retry(timeout time.Duration, f RetryFunc) error {
11 // These are used to pull the error out of the function; need a mutex to
12 // avoid a data race.
13 var resultErr error
14 var resultErrMu sync.Mutex
15
16 c := &StateChangeConf{
17 Pending: []string{"retryableerror"},
18 Target: []string{"success"},
19 Timeout: timeout,
20 MinTimeout: 500 * time.Millisecond,
21 Refresh: func() (interface{}, string, error) {
22 rerr := f()
23
24 resultErrMu.Lock()
25 defer resultErrMu.Unlock()
26
27 if rerr == nil {
28 resultErr = nil
29 return 42, "success", nil
30 }
31
32 resultErr = rerr.Err
33
34 if rerr.Retryable {
35 return 42, "retryableerror", nil
36 }
37 return nil, "quit", rerr.Err
38 },
39 }
40
41 _, waitErr := c.WaitForState()
42
43 // Need to acquire the lock here to be able to avoid race using resultErr as
44 // the return value
45 resultErrMu.Lock()
46 defer resultErrMu.Unlock()
47
48 // resultErr may be nil because the wait timed out and resultErr was never
49 // set; this is still an error
50 if resultErr == nil {
51 return waitErr
52 }
53 // resultErr takes precedence over waitErr if both are set because it is
54 // more likely to be useful
55 return resultErr
56}
57
58// RetryFunc is the function retried until it succeeds.
59type RetryFunc func() *RetryError
60
61// RetryError is the required return type of RetryFunc. It forces client code
62// to choose whether or not a given error is retryable.
63type RetryError struct {
64 Err error
65 Retryable bool
66}
67
68// RetryableError is a helper to create a RetryError that's retryable from a
69// given error.
70func RetryableError(err error) *RetryError {
71 if err == nil {
72 return nil
73 }
74 return &RetryError{Err: err, Retryable: true}
75}
76
77// NonRetryableError is a helper to create a RetryError that's _not)_ retryable
78// from a given error.
79func NonRetryableError(err error) *RetryError {
80 if err == nil {
81 return nil
82 }
83 return &RetryError{Err: err, Retryable: false}
84}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/README.md b/vendor/github.com/hashicorp/terraform/helper/schema/README.md
new file mode 100644
index 0000000..28c8362
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/README.md
@@ -0,0 +1,11 @@
1# Terraform Helper Lib: schema
2
3The `schema` package provides a high-level interface for writing resource
4providers for Terraform.
5
6If you're writing a resource provider, we recommend you use this package.
7
8The interface exposed by this package is much friendlier than trying to
9write to the Terraform API directly. The core Terraform API is low-level
10and built for maximum flexibility and control, whereas this library is built
11as a framework around that to more easily write common providers.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
new file mode 100644
index 0000000..a0729c0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
@@ -0,0 +1,94 @@
1package schema
2
3import (
4 "context"
5
6 "github.com/hashicorp/terraform/terraform"
7)
8
9// Backend represents a partial backend.Backend implementation and simplifies
10// the creation of configuration loading and validation.
11//
12// Unlike other schema structs such as Provider, this struct is meant to be
13// embedded within your actual implementation. It provides implementations
14// only for Input and Configure and gives you a method for accessing the
15// configuration in the form of a ResourceData that you're expected to call
16// from the other implementation funcs.
17type Backend struct {
18 // Schema is the schema for the configuration of this backend. If this
19 // Backend has no configuration this can be omitted.
20 Schema map[string]*Schema
21
22 // ConfigureFunc is called to configure the backend. Use the
23 // FromContext* methods to extract information from the context.
24 // This can be nil, in which case nothing will be called but the
25 // config will still be stored.
26 ConfigureFunc func(context.Context) error
27
28 config *ResourceData
29}
30
31var (
32 backendConfigKey = contextKey("backend config")
33)
34
35// FromContextBackendConfig extracts a ResourceData with the configuration
36// from the context. This should only be called by Backend functions.
37func FromContextBackendConfig(ctx context.Context) *ResourceData {
38 return ctx.Value(backendConfigKey).(*ResourceData)
39}
40
41func (b *Backend) Input(
42 input terraform.UIInput,
43 c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
44 if b == nil {
45 return c, nil
46 }
47
48 return schemaMap(b.Schema).Input(input, c)
49}
50
51func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) {
52 if b == nil {
53 return nil, nil
54 }
55
56 return schemaMap(b.Schema).Validate(c)
57}
58
59func (b *Backend) Configure(c *terraform.ResourceConfig) error {
60 if b == nil {
61 return nil
62 }
63
64 sm := schemaMap(b.Schema)
65
66 // Get a ResourceData for this configuration. To do this, we actually
67 // generate an intermediary "diff" although that is never exposed.
68 diff, err := sm.Diff(nil, c)
69 if err != nil {
70 return err
71 }
72
73 data, err := sm.Data(nil, diff)
74 if err != nil {
75 return err
76 }
77 b.config = data
78
79 if b.ConfigureFunc != nil {
80 err = b.ConfigureFunc(context.WithValue(
81 context.Background(), backendConfigKey, data))
82 if err != nil {
83 return err
84 }
85 }
86
87 return nil
88}
89
90// Config returns the configuration. This is available after Configure is
91// called.
92func (b *Backend) Config() *ResourceData {
93 return b.config
94}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
new file mode 100644
index 0000000..5a03d2d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
@@ -0,0 +1,59 @@
1package schema
2
3import (
4 "fmt"
5)
6
7// DataSourceResourceShim takes a Resource instance describing a data source
8// (with a Read implementation and a Schema, at least) and returns a new
9// Resource instance with additional Create and Delete implementations that
10// allow the data source to be used as a resource.
11//
12// This is a backward-compatibility layer for data sources that were formerly
13// read-only resources before the data source concept was added. It should not
14// be used for any *new* data sources.
15//
16// The Read function for the data source *must* call d.SetId with a non-empty
17// id in order for this shim to function as expected.
18//
19// The provided Resource instance, and its schema, will be modified in-place
20// to make it suitable for use as a full resource.
21func DataSourceResourceShim(name string, dataSource *Resource) *Resource {
22 // Recursively, in-place adjust the schema so that it has ForceNew
23 // on any user-settable resource.
24 dataSourceResourceShimAdjustSchema(dataSource.Schema)
25
26 dataSource.Create = CreateFunc(dataSource.Read)
27 dataSource.Delete = func(d *ResourceData, meta interface{}) error {
28 d.SetId("")
29 return nil
30 }
31 dataSource.Update = nil // should already be nil, but let's make sure
32
33 // FIXME: Link to some further docs either on the website or in the
34 // changelog, once such a thing exists.
35 dataSource.deprecationMessage = fmt.Sprintf(
36 "using %s as a resource is deprecated; consider using the data source instead",
37 name,
38 )
39
40 return dataSource
41}
42
43func dataSourceResourceShimAdjustSchema(schema map[string]*Schema) {
44 for _, s := range schema {
45 // If the attribute is configurable then it must be ForceNew,
46 // since we have no Update implementation.
47 if s.Required || s.Optional {
48 s.ForceNew = true
49 }
50
51 // If the attribute is a nested resource, we need to recursively
52 // apply these same adjustments to it.
53 if s.Elem != nil {
54 if r, ok := s.Elem.(*Resource); ok {
55 dataSourceResourceShimAdjustSchema(r.Schema)
56 }
57 }
58 }
59}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go
new file mode 100644
index 0000000..d5e20e0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go
@@ -0,0 +1,6 @@
1package schema
2
3// Equal is an interface that checks for deep equality between two objects.
4type Equal interface {
5 Equal(interface{}) bool
6}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
new file mode 100644
index 0000000..1660a67
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
@@ -0,0 +1,334 @@
1package schema
2
3import (
4 "fmt"
5 "strconv"
6)
7
8// FieldReaders are responsible for decoding fields out of data into
9// the proper typed representation. ResourceData uses this to query data
10// out of multiple sources: config, state, diffs, etc.
11type FieldReader interface {
12 ReadField([]string) (FieldReadResult, error)
13}
14
15// FieldReadResult encapsulates all the resulting data from reading
16// a field.
17type FieldReadResult struct {
18 // Value is the actual read value. NegValue is the _negative_ value
19 // or the items that should be removed (if they existed). NegValue
20 // doesn't make sense for primitives but is important for any
21 // container types such as maps, sets, lists.
22 Value interface{}
23 ValueProcessed interface{}
24
25 // Exists is true if the field was found in the data. False means
26 // it wasn't found if there was no error.
27 Exists bool
28
29 // Computed is true if the field was found but the value
30 // is computed.
31 Computed bool
32}
33
34// ValueOrZero returns the value of this result or the zero value of the
35// schema type, ensuring a consistent non-nil return value.
36func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} {
37 if r.Value != nil {
38 return r.Value
39 }
40
41 return s.ZeroValue()
42}
43
44// addrToSchema finds the final element schema for the given address
45// and the given schema. It returns all the schemas that led to the final
46// schema. These are in order of the address (out to in).
47func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema {
48 current := &Schema{
49 Type: typeObject,
50 Elem: schemaMap,
51 }
52
53 // If we aren't given an address, then the user is requesting the
54 // full object, so we return the special value which is the full object.
55 if len(addr) == 0 {
56 return []*Schema{current}
57 }
58
59 result := make([]*Schema, 0, len(addr))
60 for len(addr) > 0 {
61 k := addr[0]
62 addr = addr[1:]
63
64 REPEAT:
65 // We want to trim off the first "typeObject" since its not a
66 // real lookup that people do. i.e. []string{"foo"} in a structure
67 // isn't {typeObject, typeString}, its just a {typeString}.
68 if len(result) > 0 || current.Type != typeObject {
69 result = append(result, current)
70 }
71
72 switch t := current.Type; t {
73 case TypeBool, TypeInt, TypeFloat, TypeString:
74 if len(addr) > 0 {
75 return nil
76 }
77 case TypeList, TypeSet:
78 isIndex := len(addr) > 0 && addr[0] == "#"
79
80 switch v := current.Elem.(type) {
81 case *Resource:
82 current = &Schema{
83 Type: typeObject,
84 Elem: v.Schema,
85 }
86 case *Schema:
87 current = v
88 case ValueType:
89 current = &Schema{Type: v}
90 default:
91 // we may not know the Elem type and are just looking for the
92 // index
93 if isIndex {
94 break
95 }
96
97 if len(addr) == 0 {
98 // we've processed the address, so return what we've
99 // collected
100 return result
101 }
102
103 if len(addr) == 1 {
104 if _, err := strconv.Atoi(addr[0]); err == nil {
105 // we're indexing a value without a schema. This can
106 // happen if the list is nested in another schema type.
107 // Default to a TypeString like we do with a map
108 current = &Schema{Type: TypeString}
109 break
110 }
111 }
112
113 return nil
114 }
115
116 // If we only have one more thing and the next thing
117 // is a #, then we're accessing the index which is always
118 // an int.
119 if isIndex {
120 current = &Schema{Type: TypeInt}
121 break
122 }
123
124 case TypeMap:
125 if len(addr) > 0 {
126 switch v := current.Elem.(type) {
127 case ValueType:
128 current = &Schema{Type: v}
129 default:
130 // maps default to string values. This is all we can have
131 // if this is nested in another list or map.
132 current = &Schema{Type: TypeString}
133 }
134 }
135 case typeObject:
136 // If we're already in the object, then we want to handle Sets
137 // and Lists specially. Basically, their next key is the lookup
138 // key (the set value or the list element). For these scenarios,
139 // we just want to skip it and move to the next element if there
140 // is one.
141 if len(result) > 0 {
142 lastType := result[len(result)-2].Type
143 if lastType == TypeSet || lastType == TypeList {
144 if len(addr) == 0 {
145 break
146 }
147
148 k = addr[0]
149 addr = addr[1:]
150 }
151 }
152
153 m := current.Elem.(map[string]*Schema)
154 val, ok := m[k]
155 if !ok {
156 return nil
157 }
158
159 current = val
160 goto REPEAT
161 }
162 }
163
164 return result
165}
166
167// readListField is a generic method for reading a list field out of a
168// a FieldReader. It does this based on the assumption that there is a key
169// "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc.
170// after that point.
171func readListField(
172 r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) {
173 addrPadded := make([]string, len(addr)+1)
174 copy(addrPadded, addr)
175 addrPadded[len(addrPadded)-1] = "#"
176
177 // Get the number of elements in the list
178 countResult, err := r.ReadField(addrPadded)
179 if err != nil {
180 return FieldReadResult{}, err
181 }
182 if !countResult.Exists {
183 // No count, means we have no list
184 countResult.Value = 0
185 }
186
187 // If we have an empty list, then return an empty list
188 if countResult.Computed || countResult.Value.(int) == 0 {
189 return FieldReadResult{
190 Value: []interface{}{},
191 Exists: countResult.Exists,
192 Computed: countResult.Computed,
193 }, nil
194 }
195
196 // Go through each count, and get the item value out of it
197 result := make([]interface{}, countResult.Value.(int))
198 for i, _ := range result {
199 is := strconv.FormatInt(int64(i), 10)
200 addrPadded[len(addrPadded)-1] = is
201 rawResult, err := r.ReadField(addrPadded)
202 if err != nil {
203 return FieldReadResult{}, err
204 }
205 if !rawResult.Exists {
206 // This should never happen, because by the time the data
207 // gets to the FieldReaders, all the defaults should be set by
208 // Schema.
209 rawResult.Value = nil
210 }
211
212 result[i] = rawResult.Value
213 }
214
215 return FieldReadResult{
216 Value: result,
217 Exists: true,
218 }, nil
219}
220
221// readObjectField is a generic method for reading objects out of FieldReaders
222// based on the assumption that building an address of []string{k, FIELD}
223// will result in the proper field data.
224func readObjectField(
225 r FieldReader,
226 addr []string,
227 schema map[string]*Schema) (FieldReadResult, error) {
228 result := make(map[string]interface{})
229 exists := false
230 for field, s := range schema {
231 addrRead := make([]string, len(addr), len(addr)+1)
232 copy(addrRead, addr)
233 addrRead = append(addrRead, field)
234 rawResult, err := r.ReadField(addrRead)
235 if err != nil {
236 return FieldReadResult{}, err
237 }
238 if rawResult.Exists {
239 exists = true
240 }
241
242 result[field] = rawResult.ValueOrZero(s)
243 }
244
245 return FieldReadResult{
246 Value: result,
247 Exists: exists,
248 }, nil
249}
250
251// convert map values to the proper primitive type based on schema.Elem
252func mapValuesToPrimitive(m map[string]interface{}, schema *Schema) error {
253
254 elemType := TypeString
255 if et, ok := schema.Elem.(ValueType); ok {
256 elemType = et
257 }
258
259 switch elemType {
260 case TypeInt, TypeFloat, TypeBool:
261 for k, v := range m {
262 vs, ok := v.(string)
263 if !ok {
264 continue
265 }
266
267 v, err := stringToPrimitive(vs, false, &Schema{Type: elemType})
268 if err != nil {
269 return err
270 }
271
272 m[k] = v
273 }
274 }
275 return nil
276}
277
278func stringToPrimitive(
279 value string, computed bool, schema *Schema) (interface{}, error) {
280 var returnVal interface{}
281 switch schema.Type {
282 case TypeBool:
283 if value == "" {
284 returnVal = false
285 break
286 }
287 if computed {
288 break
289 }
290
291 v, err := strconv.ParseBool(value)
292 if err != nil {
293 return nil, err
294 }
295
296 returnVal = v
297 case TypeFloat:
298 if value == "" {
299 returnVal = 0.0
300 break
301 }
302 if computed {
303 break
304 }
305
306 v, err := strconv.ParseFloat(value, 64)
307 if err != nil {
308 return nil, err
309 }
310
311 returnVal = v
312 case TypeInt:
313 if value == "" {
314 returnVal = 0
315 break
316 }
317 if computed {
318 break
319 }
320
321 v, err := strconv.ParseInt(value, 0, 0)
322 if err != nil {
323 return nil, err
324 }
325
326 returnVal = int(v)
327 case TypeString:
328 returnVal = value
329 default:
330 panic(fmt.Sprintf("Unknown type: %s", schema.Type))
331 }
332
333 return returnVal, nil
334}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
new file mode 100644
index 0000000..f958bbc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
@@ -0,0 +1,333 @@
1package schema
2
3import (
4 "fmt"
5 "strconv"
6 "strings"
7 "sync"
8
9 "github.com/hashicorp/terraform/terraform"
10 "github.com/mitchellh/mapstructure"
11)
12
13// ConfigFieldReader reads fields out of an untyped map[string]string to the
14// best of its ability. It also applies defaults from the Schema. (The other
15// field readers do not need default handling because they source fully
16// populated data structures.)
17type ConfigFieldReader struct {
18 Config *terraform.ResourceConfig
19 Schema map[string]*Schema
20
21 indexMaps map[string]map[string]int
22 once sync.Once
23}
24
25func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) {
26 r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) })
27 return r.readField(address, false)
28}
29
30func (r *ConfigFieldReader) readField(
31 address []string, nested bool) (FieldReadResult, error) {
32 schemaList := addrToSchema(address, r.Schema)
33 if len(schemaList) == 0 {
34 return FieldReadResult{}, nil
35 }
36
37 if !nested {
38 // If we have a set anywhere in the address, then we need to
39 // read that set out in order and actually replace that part of
40 // the address with the real list index. i.e. set.50 might actually
41 // map to set.12 in the config, since it is in list order in the
42 // config, not indexed by set value.
43 for i, v := range schemaList {
44 // Sets are the only thing that cause this issue.
45 if v.Type != TypeSet {
46 continue
47 }
48
49 // If we're at the end of the list, then we don't have to worry
50 // about this because we're just requesting the whole set.
51 if i == len(schemaList)-1 {
52 continue
53 }
54
55 // If we're looking for the count, then ignore...
56 if address[i+1] == "#" {
57 continue
58 }
59
60 indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")]
61 if !ok {
62 // Get the set so we can get the index map that tells us the
63 // mapping of the hash code to the list index
64 _, err := r.readSet(address[:i+1], v)
65 if err != nil {
66 return FieldReadResult{}, err
67 }
68 indexMap = r.indexMaps[strings.Join(address[:i+1], ".")]
69 }
70
71 index, ok := indexMap[address[i+1]]
72 if !ok {
73 return FieldReadResult{}, nil
74 }
75
76 address[i+1] = strconv.FormatInt(int64(index), 10)
77 }
78 }
79
80 k := strings.Join(address, ".")
81 schema := schemaList[len(schemaList)-1]
82
83 // If we're getting the single element of a promoted list, then
84 // check to see if we have a single element we need to promote.
85 if address[len(address)-1] == "0" && len(schemaList) > 1 {
86 lastSchema := schemaList[len(schemaList)-2]
87 if lastSchema.Type == TypeList && lastSchema.PromoteSingle {
88 k := strings.Join(address[:len(address)-1], ".")
89 result, err := r.readPrimitive(k, schema)
90 if err == nil {
91 return result, nil
92 }
93 }
94 }
95
96 switch schema.Type {
97 case TypeBool, TypeFloat, TypeInt, TypeString:
98 return r.readPrimitive(k, schema)
99 case TypeList:
100 // If we support promotion then we first check if we have a lone
101 // value that we must promote.
102 // a value that is alone.
103 if schema.PromoteSingle {
104 result, err := r.readPrimitive(k, schema.Elem.(*Schema))
105 if err == nil && result.Exists {
106 result.Value = []interface{}{result.Value}
107 return result, nil
108 }
109 }
110
111 return readListField(&nestedConfigFieldReader{r}, address, schema)
112 case TypeMap:
113 return r.readMap(k, schema)
114 case TypeSet:
115 return r.readSet(address, schema)
116 case typeObject:
117 return readObjectField(
118 &nestedConfigFieldReader{r},
119 address, schema.Elem.(map[string]*Schema))
120 default:
121 panic(fmt.Sprintf("Unknown type: %s", schema.Type))
122 }
123}
124
125func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
126 // We want both the raw value and the interpolated. We use the interpolated
127 // to store actual values and we use the raw one to check for
128 // computed keys. Actual values are obtained in the switch, depending on
129 // the type of the raw value.
130 mraw, ok := r.Config.GetRaw(k)
131 if !ok {
132 // check if this is from an interpolated field by seeing if it exists
133 // in the config
134 _, ok := r.Config.Get(k)
135 if !ok {
136 // this really doesn't exist
137 return FieldReadResult{}, nil
138 }
139
140 // We couldn't fetch the value from a nested data structure, so treat the
141 // raw value as an interpolation string. The mraw value is only used
142 // for the type switch below.
143 mraw = "${INTERPOLATED}"
144 }
145
146 result := make(map[string]interface{})
147 computed := false
148 switch m := mraw.(type) {
149 case string:
150 // This is a map which has come out of an interpolated variable, so we
151 // can just get the value directly from config. Values cannot be computed
152 // currently.
153 v, _ := r.Config.Get(k)
154
155 // If this isn't a map[string]interface, it must be computed.
156 mapV, ok := v.(map[string]interface{})
157 if !ok {
158 return FieldReadResult{
159 Exists: true,
160 Computed: true,
161 }, nil
162 }
163
164 // Otherwise we can proceed as usual.
165 for i, iv := range mapV {
166 result[i] = iv
167 }
168 case []interface{}:
169 for i, innerRaw := range m {
170 for ik := range innerRaw.(map[string]interface{}) {
171 key := fmt.Sprintf("%s.%d.%s", k, i, ik)
172 if r.Config.IsComputed(key) {
173 computed = true
174 break
175 }
176
177 v, _ := r.Config.Get(key)
178 result[ik] = v
179 }
180 }
181 case []map[string]interface{}:
182 for i, innerRaw := range m {
183 for ik := range innerRaw {
184 key := fmt.Sprintf("%s.%d.%s", k, i, ik)
185 if r.Config.IsComputed(key) {
186 computed = true
187 break
188 }
189
190 v, _ := r.Config.Get(key)
191 result[ik] = v
192 }
193 }
194 case map[string]interface{}:
195 for ik := range m {
196 key := fmt.Sprintf("%s.%s", k, ik)
197 if r.Config.IsComputed(key) {
198 computed = true
199 break
200 }
201
202 v, _ := r.Config.Get(key)
203 result[ik] = v
204 }
205 default:
206 panic(fmt.Sprintf("unknown type: %#v", mraw))
207 }
208
209 err := mapValuesToPrimitive(result, schema)
210 if err != nil {
211 return FieldReadResult{}, nil
212 }
213
214 var value interface{}
215 if !computed {
216 value = result
217 }
218
219 return FieldReadResult{
220 Value: value,
221 Exists: true,
222 Computed: computed,
223 }, nil
224}
225
226func (r *ConfigFieldReader) readPrimitive(
227 k string, schema *Schema) (FieldReadResult, error) {
228 raw, ok := r.Config.Get(k)
229 if !ok {
230 // Nothing in config, but we might still have a default from the schema
231 var err error
232 raw, err = schema.DefaultValue()
233 if err != nil {
234 return FieldReadResult{}, fmt.Errorf("%s, error loading default: %s", k, err)
235 }
236
237 if raw == nil {
238 return FieldReadResult{}, nil
239 }
240 }
241
242 var result string
243 if err := mapstructure.WeakDecode(raw, &result); err != nil {
244 return FieldReadResult{}, err
245 }
246
247 computed := r.Config.IsComputed(k)
248 returnVal, err := stringToPrimitive(result, computed, schema)
249 if err != nil {
250 return FieldReadResult{}, err
251 }
252
253 return FieldReadResult{
254 Value: returnVal,
255 Exists: true,
256 Computed: computed,
257 }, nil
258}
259
260func (r *ConfigFieldReader) readSet(
261 address []string, schema *Schema) (FieldReadResult, error) {
262 indexMap := make(map[string]int)
263 // Create the set that will be our result
264 set := schema.ZeroValue().(*Set)
265
266 raw, err := readListField(&nestedConfigFieldReader{r}, address, schema)
267 if err != nil {
268 return FieldReadResult{}, err
269 }
270 if !raw.Exists {
271 return FieldReadResult{Value: set}, nil
272 }
273
274 // If the list is computed, the set is necessarilly computed
275 if raw.Computed {
276 return FieldReadResult{
277 Value: set,
278 Exists: true,
279 Computed: raw.Computed,
280 }, nil
281 }
282
283 // Build up the set from the list elements
284 for i, v := range raw.Value.([]interface{}) {
285 // Check if any of the keys in this item are computed
286 computed := r.hasComputedSubKeys(
287 fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema)
288
289 code := set.add(v, computed)
290 indexMap[code] = i
291 }
292
293 r.indexMaps[strings.Join(address, ".")] = indexMap
294
295 return FieldReadResult{
296 Value: set,
297 Exists: true,
298 }, nil
299}
300
301// hasComputedSubKeys walks through a schema and returns whether or not the
302// given key contains any subkeys that are computed.
303func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool {
304 prefix := key + "."
305
306 switch t := schema.Elem.(type) {
307 case *Resource:
308 for k, schema := range t.Schema {
309 if r.Config.IsComputed(prefix + k) {
310 return true
311 }
312
313 if r.hasComputedSubKeys(prefix+k, schema) {
314 return true
315 }
316 }
317 }
318
319 return false
320}
321
322// nestedConfigFieldReader is a funny little thing that just wraps a
323// ConfigFieldReader to call readField when ReadField is called so that
324// we don't recalculate the set rewrites in the address, which leads to
325// an infinite loop.
326type nestedConfigFieldReader struct {
327 Reader *ConfigFieldReader
328}
329
330func (r *nestedConfigFieldReader) ReadField(
331 address []string) (FieldReadResult, error) {
332 return r.Reader.readField(address, true)
333}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
new file mode 100644
index 0000000..16bbae2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
@@ -0,0 +1,208 @@
1package schema
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/terraform/terraform"
8 "github.com/mitchellh/mapstructure"
9)
10
11// DiffFieldReader reads fields out of a diff structures.
12//
13// It also requires access to a Reader that reads fields from the structure
14// that the diff was derived from. This is usually the state. This is required
15// because a diff on its own doesn't have complete data about full objects
16// such as maps.
17//
18// The Source MUST be the data that the diff was derived from. If it isn't,
19// the behavior of this struct is undefined.
20//
21// Reading fields from a DiffFieldReader is identical to reading from
22// Source except the diff will be applied to the end result.
23//
24// The "Exists" field on the result will be set to true if the complete
25// field exists whether its from the source, diff, or a combination of both.
26// It cannot be determined whether a retrieved value is composed of
27// diff elements.
28type DiffFieldReader struct {
29 Diff *terraform.InstanceDiff
30 Source FieldReader
31 Schema map[string]*Schema
32}
33
34func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) {
35 schemaList := addrToSchema(address, r.Schema)
36 if len(schemaList) == 0 {
37 return FieldReadResult{}, nil
38 }
39
40 schema := schemaList[len(schemaList)-1]
41 switch schema.Type {
42 case TypeBool, TypeInt, TypeFloat, TypeString:
43 return r.readPrimitive(address, schema)
44 case TypeList:
45 return readListField(r, address, schema)
46 case TypeMap:
47 return r.readMap(address, schema)
48 case TypeSet:
49 return r.readSet(address, schema)
50 case typeObject:
51 return readObjectField(r, address, schema.Elem.(map[string]*Schema))
52 default:
53 panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
54 }
55}
56
57func (r *DiffFieldReader) readMap(
58 address []string, schema *Schema) (FieldReadResult, error) {
59 result := make(map[string]interface{})
60 resultSet := false
61
62 // First read the map from the underlying source
63 source, err := r.Source.ReadField(address)
64 if err != nil {
65 return FieldReadResult{}, err
66 }
67 if source.Exists {
68 result = source.Value.(map[string]interface{})
69 resultSet = true
70 }
71
72 // Next, read all the elements we have in our diff, and apply
73 // the diff to our result.
74 prefix := strings.Join(address, ".") + "."
75 for k, v := range r.Diff.Attributes {
76 if !strings.HasPrefix(k, prefix) {
77 continue
78 }
79 if strings.HasPrefix(k, prefix+"%") {
80 // Ignore the count field
81 continue
82 }
83
84 resultSet = true
85
86 k = k[len(prefix):]
87 if v.NewRemoved {
88 delete(result, k)
89 continue
90 }
91
92 result[k] = v.New
93 }
94
95 err = mapValuesToPrimitive(result, schema)
96 if err != nil {
97 return FieldReadResult{}, nil
98 }
99
100 var resultVal interface{}
101 if resultSet {
102 resultVal = result
103 }
104
105 return FieldReadResult{
106 Value: resultVal,
107 Exists: resultSet,
108 }, nil
109}
110
111func (r *DiffFieldReader) readPrimitive(
112 address []string, schema *Schema) (FieldReadResult, error) {
113 result, err := r.Source.ReadField(address)
114 if err != nil {
115 return FieldReadResult{}, err
116 }
117
118 attrD, ok := r.Diff.Attributes[strings.Join(address, ".")]
119 if !ok {
120 return result, nil
121 }
122
123 var resultVal string
124 if !attrD.NewComputed {
125 resultVal = attrD.New
126 if attrD.NewExtra != nil {
127 result.ValueProcessed = resultVal
128 if err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil {
129 return FieldReadResult{}, err
130 }
131 }
132 }
133
134 result.Computed = attrD.NewComputed
135 result.Exists = true
136 result.Value, err = stringToPrimitive(resultVal, false, schema)
137 if err != nil {
138 return FieldReadResult{}, err
139 }
140
141 return result, nil
142}
143
144func (r *DiffFieldReader) readSet(
145 address []string, schema *Schema) (FieldReadResult, error) {
146 prefix := strings.Join(address, ".") + "."
147
148 // Create the set that will be our result
149 set := schema.ZeroValue().(*Set)
150
151 // Go through the map and find all the set items
152 for k, d := range r.Diff.Attributes {
153 if d.NewRemoved {
154 // If the field is removed, we always ignore it
155 continue
156 }
157 if !strings.HasPrefix(k, prefix) {
158 continue
159 }
160 if strings.HasSuffix(k, "#") {
161 // Ignore any count field
162 continue
163 }
164
165 // Split the key, since it might be a sub-object like "idx.field"
166 parts := strings.Split(k[len(prefix):], ".")
167 idx := parts[0]
168
169 raw, err := r.ReadField(append(address, idx))
170 if err != nil {
171 return FieldReadResult{}, err
172 }
173 if !raw.Exists {
174 // This shouldn't happen because we just verified it does exist
175 panic("missing field in set: " + k + "." + idx)
176 }
177
178 set.Add(raw.Value)
179 }
180
181 // Determine if the set "exists". It exists if there are items or if
182 // the diff explicitly wanted it empty.
183 exists := set.Len() > 0
184 if !exists {
185 // We could check if the diff value is "0" here but I think the
186 // existence of "#" on its own is enough to show it existed. This
187 // protects us in the future from the zero value changing from
188 // "0" to "" breaking us (if that were to happen).
189 if _, ok := r.Diff.Attributes[prefix+"#"]; ok {
190 exists = true
191 }
192 }
193
194 if !exists {
195 result, err := r.Source.ReadField(address)
196 if err != nil {
197 return FieldReadResult{}, err
198 }
199 if result.Exists {
200 return result, nil
201 }
202 }
203
204 return FieldReadResult{
205 Value: set,
206 Exists: exists,
207 }, nil
208}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
new file mode 100644
index 0000000..9533981
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
@@ -0,0 +1,232 @@
1package schema
2
3import (
4 "fmt"
5 "strings"
6)
7
8// MapFieldReader reads fields out of an untyped map[string]string to
9// the best of its ability.
10type MapFieldReader struct {
11 Map MapReader
12 Schema map[string]*Schema
13}
14
15func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) {
16 k := strings.Join(address, ".")
17 schemaList := addrToSchema(address, r.Schema)
18 if len(schemaList) == 0 {
19 return FieldReadResult{}, nil
20 }
21
22 schema := schemaList[len(schemaList)-1]
23 switch schema.Type {
24 case TypeBool, TypeInt, TypeFloat, TypeString:
25 return r.readPrimitive(address, schema)
26 case TypeList:
27 return readListField(r, address, schema)
28 case TypeMap:
29 return r.readMap(k, schema)
30 case TypeSet:
31 return r.readSet(address, schema)
32 case typeObject:
33 return readObjectField(r, address, schema.Elem.(map[string]*Schema))
34 default:
35 panic(fmt.Sprintf("Unknown type: %s", schema.Type))
36 }
37}
38
39func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
40 result := make(map[string]interface{})
41 resultSet := false
42
43 // If the name of the map field is directly in the map with an
44 // empty string, it means that the map is being deleted, so mark
45 // that is is set.
46 if v, ok := r.Map.Access(k); ok && v == "" {
47 resultSet = true
48 }
49
50 prefix := k + "."
51 r.Map.Range(func(k, v string) bool {
52 if strings.HasPrefix(k, prefix) {
53 resultSet = true
54
55 key := k[len(prefix):]
56 if key != "%" && key != "#" {
57 result[key] = v
58 }
59 }
60
61 return true
62 })
63
64 err := mapValuesToPrimitive(result, schema)
65 if err != nil {
66 return FieldReadResult{}, nil
67 }
68
69 var resultVal interface{}
70 if resultSet {
71 resultVal = result
72 }
73
74 return FieldReadResult{
75 Value: resultVal,
76 Exists: resultSet,
77 }, nil
78}
79
80func (r *MapFieldReader) readPrimitive(
81 address []string, schema *Schema) (FieldReadResult, error) {
82 k := strings.Join(address, ".")
83 result, ok := r.Map.Access(k)
84 if !ok {
85 return FieldReadResult{}, nil
86 }
87
88 returnVal, err := stringToPrimitive(result, false, schema)
89 if err != nil {
90 return FieldReadResult{}, err
91 }
92
93 return FieldReadResult{
94 Value: returnVal,
95 Exists: true,
96 }, nil
97}
98
99func (r *MapFieldReader) readSet(
100 address []string, schema *Schema) (FieldReadResult, error) {
101 // Get the number of elements in the list
102 countRaw, err := r.readPrimitive(
103 append(address, "#"), &Schema{Type: TypeInt})
104 if err != nil {
105 return FieldReadResult{}, err
106 }
107 if !countRaw.Exists {
108 // No count, means we have no list
109 countRaw.Value = 0
110 }
111
112 // Create the set that will be our result
113 set := schema.ZeroValue().(*Set)
114
115 // If we have an empty list, then return an empty list
116 if countRaw.Computed || countRaw.Value.(int) == 0 {
117 return FieldReadResult{
118 Value: set,
119 Exists: countRaw.Exists,
120 Computed: countRaw.Computed,
121 }, nil
122 }
123
124 // Go through the map and find all the set items
125 prefix := strings.Join(address, ".") + "."
126 countExpected := countRaw.Value.(int)
127 countActual := make(map[string]struct{})
128 completed := r.Map.Range(func(k, _ string) bool {
129 if !strings.HasPrefix(k, prefix) {
130 return true
131 }
132 if strings.HasPrefix(k, prefix+"#") {
133 // Ignore the count field
134 return true
135 }
136
137 // Split the key, since it might be a sub-object like "idx.field"
138 parts := strings.Split(k[len(prefix):], ".")
139 idx := parts[0]
140
141 var raw FieldReadResult
142 raw, err = r.ReadField(append(address, idx))
143 if err != nil {
144 return false
145 }
146 if !raw.Exists {
147 // This shouldn't happen because we just verified it does exist
148 panic("missing field in set: " + k + "." + idx)
149 }
150
151 set.Add(raw.Value)
152
153 // Due to the way multimap readers work, if we've seen the number
154 // of fields we expect, then exit so that we don't read later values.
155 // For example: the "set" map might have "ports.#", "ports.0", and
156 // "ports.1", but the "state" map might have those plus "ports.2".
157 // We don't want "ports.2"
158 countActual[idx] = struct{}{}
159 if len(countActual) >= countExpected {
160 return false
161 }
162
163 return true
164 })
165 if !completed && err != nil {
166 return FieldReadResult{}, err
167 }
168
169 return FieldReadResult{
170 Value: set,
171 Exists: true,
172 }, nil
173}
174
175// MapReader is an interface that is given to MapFieldReader for accessing
176// a "map". This can be used to have alternate implementations. For a basic
177// map[string]string, use BasicMapReader.
178type MapReader interface {
179 Access(string) (string, bool)
180 Range(func(string, string) bool) bool
181}
182
183// BasicMapReader implements MapReader for a single map.
184type BasicMapReader map[string]string
185
186func (r BasicMapReader) Access(k string) (string, bool) {
187 v, ok := r[k]
188 return v, ok
189}
190
191func (r BasicMapReader) Range(f func(string, string) bool) bool {
192 for k, v := range r {
193 if cont := f(k, v); !cont {
194 return false
195 }
196 }
197
198 return true
199}
200
201// MultiMapReader reads over multiple maps, preferring keys that are
202// founder earlier (lower number index) vs. later (higher number index)
203type MultiMapReader []map[string]string
204
205func (r MultiMapReader) Access(k string) (string, bool) {
206 for _, m := range r {
207 if v, ok := m[k]; ok {
208 return v, ok
209 }
210 }
211
212 return "", false
213}
214
215func (r MultiMapReader) Range(f func(string, string) bool) bool {
216 done := make(map[string]struct{})
217 for _, m := range r {
218 for k, v := range m {
219 if _, ok := done[k]; ok {
220 continue
221 }
222
223 if cont := f(k, v); !cont {
224 return false
225 }
226
227 done[k] = struct{}{}
228 }
229 }
230
231 return true
232}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go
new file mode 100644
index 0000000..89ad3a8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go
@@ -0,0 +1,63 @@
1package schema
2
3import (
4 "fmt"
5)
6
7// MultiLevelFieldReader reads from other field readers,
8// merging their results along the way in a specific order. You can specify
9// "levels" and name them in order to read only an exact level or up to
10// a specific level.
11//
12// This is useful for saying things such as "read the field from the state
13// and config and merge them" or "read the latest value of the field".
14type MultiLevelFieldReader struct {
15 Readers map[string]FieldReader
16 Levels []string
17}
18
19func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) {
20 return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1])
21}
22
23func (r *MultiLevelFieldReader) ReadFieldExact(
24 address []string, level string) (FieldReadResult, error) {
25 reader, ok := r.Readers[level]
26 if !ok {
27 return FieldReadResult{}, fmt.Errorf(
28 "Unknown reader level: %s", level)
29 }
30
31 result, err := reader.ReadField(address)
32 if err != nil {
33 return FieldReadResult{}, fmt.Errorf(
34 "Error reading level %s: %s", level, err)
35 }
36
37 return result, nil
38}
39
40func (r *MultiLevelFieldReader) ReadFieldMerge(
41 address []string, level string) (FieldReadResult, error) {
42 var result FieldReadResult
43 for _, l := range r.Levels {
44 if r, ok := r.Readers[l]; ok {
45 out, err := r.ReadField(address)
46 if err != nil {
47 return FieldReadResult{}, fmt.Errorf(
48 "Error reading level %s: %s", l, err)
49 }
50
51 // TODO: computed
52 if out.Exists {
53 result = out
54 }
55 }
56
57 if l == level {
58 break
59 }
60 }
61
62 return result, nil
63}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go
new file mode 100644
index 0000000..9abc41b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go
@@ -0,0 +1,8 @@
1package schema
2
3// FieldWriters are responsible for writing fields by address into
4// a proper typed representation. ResourceData uses this to write new data
5// into existing sources.
6type FieldWriter interface {
7 WriteField([]string, interface{}) error
8}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
new file mode 100644
index 0000000..689ed8d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
@@ -0,0 +1,319 @@
1package schema
2
3import (
4 "fmt"
5 "reflect"
6 "strconv"
7 "strings"
8 "sync"
9
10 "github.com/mitchellh/mapstructure"
11)
12
13// MapFieldWriter writes data into a single map[string]string structure.
14type MapFieldWriter struct {
15 Schema map[string]*Schema
16
17 lock sync.Mutex
18 result map[string]string
19}
20
21// Map returns the underlying map that is being written to.
22func (w *MapFieldWriter) Map() map[string]string {
23 w.lock.Lock()
24 defer w.lock.Unlock()
25 if w.result == nil {
26 w.result = make(map[string]string)
27 }
28
29 return w.result
30}
31
32func (w *MapFieldWriter) unsafeWriteField(addr string, value string) {
33 w.lock.Lock()
34 defer w.lock.Unlock()
35 if w.result == nil {
36 w.result = make(map[string]string)
37 }
38
39 w.result[addr] = value
40}
41
42func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error {
43 w.lock.Lock()
44 defer w.lock.Unlock()
45 if w.result == nil {
46 w.result = make(map[string]string)
47 }
48
49 schemaList := addrToSchema(addr, w.Schema)
50 if len(schemaList) == 0 {
51 return fmt.Errorf("Invalid address to set: %#v", addr)
52 }
53
54 // If we're setting anything other than a list root or set root,
55 // then disallow it.
56 for _, schema := range schemaList[:len(schemaList)-1] {
57 if schema.Type == TypeList {
58 return fmt.Errorf(
59 "%s: can only set full list",
60 strings.Join(addr, "."))
61 }
62
63 if schema.Type == TypeMap {
64 return fmt.Errorf(
65 "%s: can only set full map",
66 strings.Join(addr, "."))
67 }
68
69 if schema.Type == TypeSet {
70 return fmt.Errorf(
71 "%s: can only set full set",
72 strings.Join(addr, "."))
73 }
74 }
75
76 return w.set(addr, value)
77}
78
79func (w *MapFieldWriter) set(addr []string, value interface{}) error {
80 schemaList := addrToSchema(addr, w.Schema)
81 if len(schemaList) == 0 {
82 return fmt.Errorf("Invalid address to set: %#v", addr)
83 }
84
85 schema := schemaList[len(schemaList)-1]
86 switch schema.Type {
87 case TypeBool, TypeInt, TypeFloat, TypeString:
88 return w.setPrimitive(addr, value, schema)
89 case TypeList:
90 return w.setList(addr, value, schema)
91 case TypeMap:
92 return w.setMap(addr, value, schema)
93 case TypeSet:
94 return w.setSet(addr, value, schema)
95 case typeObject:
96 return w.setObject(addr, value, schema)
97 default:
98 panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
99 }
100}
101
102func (w *MapFieldWriter) setList(
103 addr []string,
104 v interface{},
105 schema *Schema) error {
106 k := strings.Join(addr, ".")
107 setElement := func(idx string, value interface{}) error {
108 addrCopy := make([]string, len(addr), len(addr)+1)
109 copy(addrCopy, addr)
110 return w.set(append(addrCopy, idx), value)
111 }
112
113 var vs []interface{}
114 if err := mapstructure.Decode(v, &vs); err != nil {
115 return fmt.Errorf("%s: %s", k, err)
116 }
117
118 // Set the entire list.
119 var err error
120 for i, elem := range vs {
121 is := strconv.FormatInt(int64(i), 10)
122 err = setElement(is, elem)
123 if err != nil {
124 break
125 }
126 }
127 if err != nil {
128 for i, _ := range vs {
129 is := strconv.FormatInt(int64(i), 10)
130 setElement(is, nil)
131 }
132
133 return err
134 }
135
136 w.result[k+".#"] = strconv.FormatInt(int64(len(vs)), 10)
137 return nil
138}
139
140func (w *MapFieldWriter) setMap(
141 addr []string,
142 value interface{},
143 schema *Schema) error {
144 k := strings.Join(addr, ".")
145 v := reflect.ValueOf(value)
146 vs := make(map[string]interface{})
147
148 if value == nil {
149 // The empty string here means the map is removed.
150 w.result[k] = ""
151 return nil
152 }
153
154 if v.Kind() != reflect.Map {
155 return fmt.Errorf("%s: must be a map", k)
156 }
157 if v.Type().Key().Kind() != reflect.String {
158 return fmt.Errorf("%s: keys must strings", k)
159 }
160 for _, mk := range v.MapKeys() {
161 mv := v.MapIndex(mk)
162 vs[mk.String()] = mv.Interface()
163 }
164
165 // Remove the pure key since we're setting the full map value
166 delete(w.result, k)
167
168 // Set each subkey
169 addrCopy := make([]string, len(addr), len(addr)+1)
170 copy(addrCopy, addr)
171 for subKey, v := range vs {
172 if err := w.set(append(addrCopy, subKey), v); err != nil {
173 return err
174 }
175 }
176
177 // Set the count
178 w.result[k+".%"] = strconv.Itoa(len(vs))
179
180 return nil
181}
182
183func (w *MapFieldWriter) setObject(
184 addr []string,
185 value interface{},
186 schema *Schema) error {
187 // Set the entire object. First decode into a proper structure
188 var v map[string]interface{}
189 if err := mapstructure.Decode(value, &v); err != nil {
190 return fmt.Errorf("%s: %s", strings.Join(addr, "."), err)
191 }
192
193 // Make space for additional elements in the address
194 addrCopy := make([]string, len(addr), len(addr)+1)
195 copy(addrCopy, addr)
196
197 // Set each element in turn
198 var err error
199 for k1, v1 := range v {
200 if err = w.set(append(addrCopy, k1), v1); err != nil {
201 break
202 }
203 }
204 if err != nil {
205 for k1, _ := range v {
206 w.set(append(addrCopy, k1), nil)
207 }
208 }
209
210 return err
211}
212
213func (w *MapFieldWriter) setPrimitive(
214 addr []string,
215 v interface{},
216 schema *Schema) error {
217 k := strings.Join(addr, ".")
218
219 if v == nil {
220 // The empty string here means the value is removed.
221 w.result[k] = ""
222 return nil
223 }
224
225 var set string
226 switch schema.Type {
227 case TypeBool:
228 var b bool
229 if err := mapstructure.Decode(v, &b); err != nil {
230 return fmt.Errorf("%s: %s", k, err)
231 }
232
233 set = strconv.FormatBool(b)
234 case TypeString:
235 if err := mapstructure.Decode(v, &set); err != nil {
236 return fmt.Errorf("%s: %s", k, err)
237 }
238 case TypeInt:
239 var n int
240 if err := mapstructure.Decode(v, &n); err != nil {
241 return fmt.Errorf("%s: %s", k, err)
242 }
243 set = strconv.FormatInt(int64(n), 10)
244 case TypeFloat:
245 var n float64
246 if err := mapstructure.Decode(v, &n); err != nil {
247 return fmt.Errorf("%s: %s", k, err)
248 }
249 set = strconv.FormatFloat(float64(n), 'G', -1, 64)
250 default:
251 return fmt.Errorf("Unknown type: %#v", schema.Type)
252 }
253
254 w.result[k] = set
255 return nil
256}
257
258func (w *MapFieldWriter) setSet(
259 addr []string,
260 value interface{},
261 schema *Schema) error {
262 addrCopy := make([]string, len(addr), len(addr)+1)
263 copy(addrCopy, addr)
264 k := strings.Join(addr, ".")
265
266 if value == nil {
267 w.result[k+".#"] = "0"
268 return nil
269 }
270
271 // If it is a slice, then we have to turn it into a *Set so that
272 // we get the proper order back based on the hash code.
273 if v := reflect.ValueOf(value); v.Kind() == reflect.Slice {
274 // Build a temp *ResourceData to use for the conversion
275 tempSchema := *schema
276 tempSchema.Type = TypeList
277 tempSchemaMap := map[string]*Schema{addr[0]: &tempSchema}
278 tempW := &MapFieldWriter{Schema: tempSchemaMap}
279
280 // Set the entire list, this lets us get sane values out of it
281 if err := tempW.WriteField(addr, value); err != nil {
282 return err
283 }
284
285 // Build the set by going over the list items in order and
286 // hashing them into the set. The reason we go over the list and
287 // not the `value` directly is because this forces all types
288 // to become []interface{} (generic) instead of []string, which
289 // most hash functions are expecting.
290 s := schema.ZeroValue().(*Set)
291 tempR := &MapFieldReader{
292 Map: BasicMapReader(tempW.Map()),
293 Schema: tempSchemaMap,
294 }
295 for i := 0; i < v.Len(); i++ {
296 is := strconv.FormatInt(int64(i), 10)
297 result, err := tempR.ReadField(append(addrCopy, is))
298 if err != nil {
299 return err
300 }
301 if !result.Exists {
302 panic("set item just set doesn't exist")
303 }
304
305 s.Add(result.Value)
306 }
307
308 value = s
309 }
310
311 for code, elem := range value.(*Set).m {
312 if err := w.set(append(addrCopy, code), elem); err != nil {
313 return err
314 }
315 }
316
317 w.result[k+".#"] = strconv.Itoa(value.(*Set).Len())
318 return nil
319}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
new file mode 100644
index 0000000..3a97629
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
@@ -0,0 +1,36 @@
1// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT.
2
3package schema
4
5import "fmt"
6
7const (
8 _getSource_name_0 = "getSourceStategetSourceConfig"
9 _getSource_name_1 = "getSourceDiff"
10 _getSource_name_2 = "getSourceSet"
11 _getSource_name_3 = "getSourceLevelMaskgetSourceExact"
12)
13
14var (
15 _getSource_index_0 = [...]uint8{0, 14, 29}
16 _getSource_index_1 = [...]uint8{0, 13}
17 _getSource_index_2 = [...]uint8{0, 12}
18 _getSource_index_3 = [...]uint8{0, 18, 32}
19)
20
21func (i getSource) String() string {
22 switch {
23 case 1 <= i && i <= 2:
24 i -= 1
25 return _getSource_name_0[_getSource_index_0[i]:_getSource_index_0[i+1]]
26 case i == 4:
27 return _getSource_name_1
28 case i == 8:
29 return _getSource_name_2
30 case 15 <= i && i <= 16:
31 i -= 15
32 return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]]
33 default:
34 return fmt.Sprintf("getSource(%d)", i)
35 }
36}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
new file mode 100644
index 0000000..d52d2f5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
@@ -0,0 +1,400 @@
1package schema
2
3import (
4 "context"
5 "errors"
6 "fmt"
7 "sort"
8 "sync"
9
10 "github.com/hashicorp/go-multierror"
11 "github.com/hashicorp/terraform/terraform"
12)
13
14// Provider represents a resource provider in Terraform, and properly
15// implements all of the ResourceProvider API.
16//
17// By defining a schema for the configuration of the provider, the
18// map of supporting resources, and a configuration function, the schema
19// framework takes over and handles all the provider operations for you.
20//
21// After defining the provider structure, it is unlikely that you'll require any
22// of the methods on Provider itself.
23type Provider struct {
24 // Schema is the schema for the configuration of this provider. If this
25 // provider has no configuration, this can be omitted.
26 //
27 // The keys of this map are the configuration keys, and the value is
28 // the schema describing the value of the configuration.
29 Schema map[string]*Schema
30
31 // ResourcesMap is the list of available resources that this provider
32 // can manage, along with their Resource structure defining their
33 // own schemas and CRUD operations.
34 //
35 // Provider automatically handles routing operations such as Apply,
36 // Diff, etc. to the proper resource.
37 ResourcesMap map[string]*Resource
38
39 // DataSourcesMap is the collection of available data sources that
40 // this provider implements, with a Resource instance defining
41 // the schema and Read operation of each.
42 //
43 // Resource instances for data sources must have a Read function
44 // and must *not* implement Create, Update or Delete.
45 DataSourcesMap map[string]*Resource
46
47 // ConfigureFunc is a function for configuring the provider. If the
48 // provider doesn't need to be configured, this can be omitted.
49 //
50 // See the ConfigureFunc documentation for more information.
51 ConfigureFunc ConfigureFunc
52
53 // MetaReset is called by TestReset to reset any state stored in the meta
54 // interface. This is especially important if the StopContext is stored by
55 // the provider.
56 MetaReset func() error
57
58 meta interface{}
59
60 // a mutex is required because TestReset can directly repalce the stopCtx
61 stopMu sync.Mutex
62 stopCtx context.Context
63 stopCtxCancel context.CancelFunc
64 stopOnce sync.Once
65}
66
67// ConfigureFunc is the function used to configure a Provider.
68//
69// The interface{} value returned by this function is stored and passed into
70// the subsequent resources as the meta parameter. This return value is
71// usually used to pass along a configured API client, a configuration
72// structure, etc.
73type ConfigureFunc func(*ResourceData) (interface{}, error)
74
75// InternalValidate should be called to validate the structure
76// of the provider.
77//
78// This should be called in a unit test for any provider to verify
79// before release that a provider is properly configured for use with
80// this library.
81func (p *Provider) InternalValidate() error {
82 if p == nil {
83 return errors.New("provider is nil")
84 }
85
86 var validationErrors error
87 sm := schemaMap(p.Schema)
88 if err := sm.InternalValidate(sm); err != nil {
89 validationErrors = multierror.Append(validationErrors, err)
90 }
91
92 for k, r := range p.ResourcesMap {
93 if err := r.InternalValidate(nil, true); err != nil {
94 validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err))
95 }
96 }
97
98 for k, r := range p.DataSourcesMap {
99 if err := r.InternalValidate(nil, false); err != nil {
100 validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err))
101 }
102 }
103
104 return validationErrors
105}
106
107// Meta returns the metadata associated with this provider that was
108// returned by the Configure call. It will be nil until Configure is called.
109func (p *Provider) Meta() interface{} {
110 return p.meta
111}
112
113// SetMeta can be used to forcefully set the Meta object of the provider.
114// Note that if Configure is called the return value will override anything
115// set here.
116func (p *Provider) SetMeta(v interface{}) {
117 p.meta = v
118}
119
120// Stopped reports whether the provider has been stopped or not.
121func (p *Provider) Stopped() bool {
122 ctx := p.StopContext()
123 select {
124 case <-ctx.Done():
125 return true
126 default:
127 return false
128 }
129}
130
131// StopCh returns a channel that is closed once the provider is stopped.
132func (p *Provider) StopContext() context.Context {
133 p.stopOnce.Do(p.stopInit)
134
135 p.stopMu.Lock()
136 defer p.stopMu.Unlock()
137
138 return p.stopCtx
139}
140
141func (p *Provider) stopInit() {
142 p.stopMu.Lock()
143 defer p.stopMu.Unlock()
144
145 p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
146}
147
148// Stop implementation of terraform.ResourceProvider interface.
149func (p *Provider) Stop() error {
150 p.stopOnce.Do(p.stopInit)
151
152 p.stopMu.Lock()
153 defer p.stopMu.Unlock()
154
155 p.stopCtxCancel()
156 return nil
157}
158
159// TestReset resets any state stored in the Provider, and will call TestReset
160// on Meta if it implements the TestProvider interface.
161// This may be used to reset the schema.Provider at the start of a test, and is
162// automatically called by resource.Test.
163func (p *Provider) TestReset() error {
164 p.stopInit()
165 if p.MetaReset != nil {
166 return p.MetaReset()
167 }
168 return nil
169}
170
171// Input implementation of terraform.ResourceProvider interface.
172func (p *Provider) Input(
173 input terraform.UIInput,
174 c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
175 return schemaMap(p.Schema).Input(input, c)
176}
177
178// Validate implementation of terraform.ResourceProvider interface.
179func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
180 if err := p.InternalValidate(); err != nil {
181 return nil, []error{fmt.Errorf(
182 "Internal validation of the provider failed! This is always a bug\n"+
183 "with the provider itself, and not a user issue. Please report\n"+
184 "this bug:\n\n%s", err)}
185 }
186
187 return schemaMap(p.Schema).Validate(c)
188}
189
190// ValidateResource implementation of terraform.ResourceProvider interface.
191func (p *Provider) ValidateResource(
192 t string, c *terraform.ResourceConfig) ([]string, []error) {
193 r, ok := p.ResourcesMap[t]
194 if !ok {
195 return nil, []error{fmt.Errorf(
196 "Provider doesn't support resource: %s", t)}
197 }
198
199 return r.Validate(c)
200}
201
202// Configure implementation of terraform.ResourceProvider interface.
203func (p *Provider) Configure(c *terraform.ResourceConfig) error {
204 // No configuration
205 if p.ConfigureFunc == nil {
206 return nil
207 }
208
209 sm := schemaMap(p.Schema)
210
211 // Get a ResourceData for this configuration. To do this, we actually
212 // generate an intermediary "diff" although that is never exposed.
213 diff, err := sm.Diff(nil, c)
214 if err != nil {
215 return err
216 }
217
218 data, err := sm.Data(nil, diff)
219 if err != nil {
220 return err
221 }
222
223 meta, err := p.ConfigureFunc(data)
224 if err != nil {
225 return err
226 }
227
228 p.meta = meta
229 return nil
230}
231
232// Apply implementation of terraform.ResourceProvider interface.
233func (p *Provider) Apply(
234 info *terraform.InstanceInfo,
235 s *terraform.InstanceState,
236 d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
237 r, ok := p.ResourcesMap[info.Type]
238 if !ok {
239 return nil, fmt.Errorf("unknown resource type: %s", info.Type)
240 }
241
242 return r.Apply(s, d, p.meta)
243}
244
245// Diff implementation of terraform.ResourceProvider interface.
246func (p *Provider) Diff(
247 info *terraform.InstanceInfo,
248 s *terraform.InstanceState,
249 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
250 r, ok := p.ResourcesMap[info.Type]
251 if !ok {
252 return nil, fmt.Errorf("unknown resource type: %s", info.Type)
253 }
254
255 return r.Diff(s, c)
256}
257
258// Refresh implementation of terraform.ResourceProvider interface.
259func (p *Provider) Refresh(
260 info *terraform.InstanceInfo,
261 s *terraform.InstanceState) (*terraform.InstanceState, error) {
262 r, ok := p.ResourcesMap[info.Type]
263 if !ok {
264 return nil, fmt.Errorf("unknown resource type: %s", info.Type)
265 }
266
267 return r.Refresh(s, p.meta)
268}
269
270// Resources implementation of terraform.ResourceProvider interface.
271func (p *Provider) Resources() []terraform.ResourceType {
272 keys := make([]string, 0, len(p.ResourcesMap))
273 for k, _ := range p.ResourcesMap {
274 keys = append(keys, k)
275 }
276 sort.Strings(keys)
277
278 result := make([]terraform.ResourceType, 0, len(keys))
279 for _, k := range keys {
280 resource := p.ResourcesMap[k]
281
282 // This isn't really possible (it'd fail InternalValidate), but
283 // we do it anyways to avoid a panic.
284 if resource == nil {
285 resource = &Resource{}
286 }
287
288 result = append(result, terraform.ResourceType{
289 Name: k,
290 Importable: resource.Importer != nil,
291 })
292 }
293
294 return result
295}
296
297func (p *Provider) ImportState(
298 info *terraform.InstanceInfo,
299 id string) ([]*terraform.InstanceState, error) {
300 // Find the resource
301 r, ok := p.ResourcesMap[info.Type]
302 if !ok {
303 return nil, fmt.Errorf("unknown resource type: %s", info.Type)
304 }
305
306 // If it doesn't support import, error
307 if r.Importer == nil {
308 return nil, fmt.Errorf("resource %s doesn't support import", info.Type)
309 }
310
311 // Create the data
312 data := r.Data(nil)
313 data.SetId(id)
314 data.SetType(info.Type)
315
316 // Call the import function
317 results := []*ResourceData{data}
318 if r.Importer.State != nil {
319 var err error
320 results, err = r.Importer.State(data, p.meta)
321 if err != nil {
322 return nil, err
323 }
324 }
325
326 // Convert the results to InstanceState values and return it
327 states := make([]*terraform.InstanceState, len(results))
328 for i, r := range results {
329 states[i] = r.State()
330 }
331
332 // Verify that all are non-nil. If there are any nil the error
333 // isn't obvious so we circumvent that with a friendlier error.
334 for _, s := range states {
335 if s == nil {
336 return nil, fmt.Errorf(
337 "nil entry in ImportState results. This is always a bug with\n" +
338 "the resource that is being imported. Please report this as\n" +
339 "a bug to Terraform.")
340 }
341 }
342
343 return states, nil
344}
345
346// ValidateDataSource implementation of terraform.ResourceProvider interface.
347func (p *Provider) ValidateDataSource(
348 t string, c *terraform.ResourceConfig) ([]string, []error) {
349 r, ok := p.DataSourcesMap[t]
350 if !ok {
351 return nil, []error{fmt.Errorf(
352 "Provider doesn't support data source: %s", t)}
353 }
354
355 return r.Validate(c)
356}
357
358// ReadDataDiff implementation of terraform.ResourceProvider interface.
359func (p *Provider) ReadDataDiff(
360 info *terraform.InstanceInfo,
361 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
362
363 r, ok := p.DataSourcesMap[info.Type]
364 if !ok {
365 return nil, fmt.Errorf("unknown data source: %s", info.Type)
366 }
367
368 return r.Diff(nil, c)
369}
370
371// RefreshData implementation of terraform.ResourceProvider interface.
372func (p *Provider) ReadDataApply(
373 info *terraform.InstanceInfo,
374 d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
375
376 r, ok := p.DataSourcesMap[info.Type]
377 if !ok {
378 return nil, fmt.Errorf("unknown data source: %s", info.Type)
379 }
380
381 return r.ReadDataApply(d, p.meta)
382}
383
384// DataSources implementation of terraform.ResourceProvider interface.
385func (p *Provider) DataSources() []terraform.DataSource {
386 keys := make([]string, 0, len(p.DataSourcesMap))
387 for k, _ := range p.DataSourcesMap {
388 keys = append(keys, k)
389 }
390 sort.Strings(keys)
391
392 result := make([]terraform.DataSource, 0, len(keys))
393 for _, k := range keys {
394 result = append(result, terraform.DataSource{
395 Name: k,
396 })
397 }
398
399 return result
400}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
new file mode 100644
index 0000000..c1564a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
@@ -0,0 +1,180 @@
1package schema
2
3import (
4 "context"
5 "errors"
6 "fmt"
7 "sync"
8
9 "github.com/hashicorp/go-multierror"
10 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/terraform"
12)
13
14// Provisioner represents a resource provisioner in Terraform and properly
15// implements all of the ResourceProvisioner API.
16//
17// This higher level structure makes it much easier to implement a new or
18// custom provisioner for Terraform.
19//
20// The function callbacks for this structure are all passed a context object.
21// This context object has a number of pre-defined values that can be accessed
22// via the global functions defined in context.go.
23type Provisioner struct {
24 // ConnSchema is the schema for the connection settings for this
25 // provisioner.
26 //
27 // The keys of this map are the configuration keys, and the value is
28 // the schema describing the value of the configuration.
29 //
30 // NOTE: The value of connection keys can only be strings for now.
31 ConnSchema map[string]*Schema
32
33 // Schema is the schema for the usage of this provisioner.
34 //
35 // The keys of this map are the configuration keys, and the value is
36 // the schema describing the value of the configuration.
37 Schema map[string]*Schema
38
39 // ApplyFunc is the function for executing the provisioner. This is required.
40 // It is given a context. See the Provisioner struct docs for more
41 // information.
42 ApplyFunc func(ctx context.Context) error
43
44 stopCtx context.Context
45 stopCtxCancel context.CancelFunc
46 stopOnce sync.Once
47}
48
49// Keys that can be used to access data in the context parameters for
50// Provisioners.
51var (
52 connDataInvalid = contextKey("data invalid")
53
54 // This returns a *ResourceData for the connection information.
55 // Guaranteed to never be nil.
56 ProvConnDataKey = contextKey("provider conn data")
57
58 // This returns a *ResourceData for the config information.
59 // Guaranteed to never be nil.
60 ProvConfigDataKey = contextKey("provider config data")
61
62 // This returns a terraform.UIOutput. Guaranteed to never be nil.
63 ProvOutputKey = contextKey("provider output")
64
65 // This returns the raw InstanceState passed to Apply. Guaranteed to
66 // be set, but may be nil.
67 ProvRawStateKey = contextKey("provider raw state")
68)
69
70// InternalValidate should be called to validate the structure
71// of the provisioner.
72//
73// This should be called in a unit test to verify before release that this
74// structure is properly configured for use.
75func (p *Provisioner) InternalValidate() error {
76 if p == nil {
77 return errors.New("provisioner is nil")
78 }
79
80 var validationErrors error
81 {
82 sm := schemaMap(p.ConnSchema)
83 if err := sm.InternalValidate(sm); err != nil {
84 validationErrors = multierror.Append(validationErrors, err)
85 }
86 }
87
88 {
89 sm := schemaMap(p.Schema)
90 if err := sm.InternalValidate(sm); err != nil {
91 validationErrors = multierror.Append(validationErrors, err)
92 }
93 }
94
95 if p.ApplyFunc == nil {
96 validationErrors = multierror.Append(validationErrors, fmt.Errorf(
97 "ApplyFunc must not be nil"))
98 }
99
100 return validationErrors
101}
102
103// StopContext returns a context that checks whether a provisioner is stopped.
104func (p *Provisioner) StopContext() context.Context {
105 p.stopOnce.Do(p.stopInit)
106 return p.stopCtx
107}
108
109func (p *Provisioner) stopInit() {
110 p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
111}
112
113// Stop implementation of terraform.ResourceProvisioner interface.
114func (p *Provisioner) Stop() error {
115 p.stopOnce.Do(p.stopInit)
116 p.stopCtxCancel()
117 return nil
118}
119
120func (p *Provisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {
121 return schemaMap(p.Schema).Validate(c)
122}
123
124// Apply implementation of terraform.ResourceProvisioner interface.
125func (p *Provisioner) Apply(
126 o terraform.UIOutput,
127 s *terraform.InstanceState,
128 c *terraform.ResourceConfig) error {
129 var connData, configData *ResourceData
130
131 {
132 // We first need to turn the connection information into a
133 // terraform.ResourceConfig so that we can use that type to more
134 // easily build a ResourceData structure. We do this by simply treating
135 // the conn info as configuration input.
136 raw := make(map[string]interface{})
137 if s != nil {
138 for k, v := range s.Ephemeral.ConnInfo {
139 raw[k] = v
140 }
141 }
142
143 c, err := config.NewRawConfig(raw)
144 if err != nil {
145 return err
146 }
147
148 sm := schemaMap(p.ConnSchema)
149 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c))
150 if err != nil {
151 return err
152 }
153 connData, err = sm.Data(nil, diff)
154 if err != nil {
155 return err
156 }
157 }
158
159 {
160 // Build the configuration data. Doing this requires making a "diff"
161 // even though that's never used. We use that just to get the correct types.
162 configMap := schemaMap(p.Schema)
163 diff, err := configMap.Diff(nil, c)
164 if err != nil {
165 return err
166 }
167 configData, err = configMap.Data(nil, diff)
168 if err != nil {
169 return err
170 }
171 }
172
173 // Build the context and call the function
174 ctx := p.StopContext()
175 ctx = context.WithValue(ctx, ProvConnDataKey, connData)
176 ctx = context.WithValue(ctx, ProvConfigDataKey, configData)
177 ctx = context.WithValue(ctx, ProvOutputKey, o)
178 ctx = context.WithValue(ctx, ProvRawStateKey, s)
179 return p.ApplyFunc(ctx)
180}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
new file mode 100644
index 0000000..c810558
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
@@ -0,0 +1,478 @@
1package schema
2
3import (
4 "errors"
5 "fmt"
6 "log"
7 "strconv"
8
9 "github.com/hashicorp/terraform/terraform"
10)
11
12// Resource represents a thing in Terraform that has a set of configurable
13// attributes and a lifecycle (create, read, update, delete).
14//
15// The Resource schema is an abstraction that allows provider writers to
16// worry only about CRUD operations while off-loading validation, diff
17// generation, etc. to this higher level library.
18//
19// In spite of the name, this struct is not used only for terraform resources,
20// but also for data sources. In the case of data sources, the Create,
21// Update and Delete functions must not be provided.
22type Resource struct {
23 // Schema is the schema for the configuration of this resource.
24 //
25 // The keys of this map are the configuration keys, and the values
26 // describe the schema of the configuration value.
27 //
28 // The schema is used to represent both configurable data as well
29 // as data that might be computed in the process of creating this
30 // resource.
31 Schema map[string]*Schema
32
33 // SchemaVersion is the version number for this resource's Schema
34 // definition. The current SchemaVersion stored in the state for each
35 // resource. Provider authors can increment this version number
36 // when Schema semantics change. If the State's SchemaVersion is less than
37 // the current SchemaVersion, the InstanceState is yielded to the
38 // MigrateState callback, where the provider can make whatever changes it
39 // needs to update the state to be compatible to the latest version of the
40 // Schema.
41 //
42 // When unset, SchemaVersion defaults to 0, so provider authors can start
43 // their Versioning at any integer >= 1
44 SchemaVersion int
45
46 // MigrateState is responsible for updating an InstanceState with an old
47 // version to the format expected by the current version of the Schema.
48 //
49 // It is called during Refresh if the State's stored SchemaVersion is less
50 // than the current SchemaVersion of the Resource.
51 //
52 // The function is yielded the state's stored SchemaVersion and a pointer to
53 // the InstanceState that needs updating, as well as the configured
54 // provider's configured meta interface{}, in case the migration process
55 // needs to make any remote API calls.
56 MigrateState StateMigrateFunc
57
58 // The functions below are the CRUD operations for this resource.
59 //
60 // The only optional operation is Update. If Update is not implemented,
61 // then updates will not be supported for this resource.
62 //
63 // The ResourceData parameter in the functions below are used to
64 // query configuration and changes for the resource as well as to set
65 // the ID, computed data, etc.
66 //
67 // The interface{} parameter is the result of the ConfigureFunc in
68 // the provider for this resource. If the provider does not define
69 // a ConfigureFunc, this will be nil. This parameter should be used
70 // to store API clients, configuration structures, etc.
71 //
72 // If any errors occur during each of the operation, an error should be
73 // returned. If a resource was partially updated, be careful to enable
74 // partial state mode for ResourceData and use it accordingly.
75 //
76 // Exists is a function that is called to check if a resource still
77 // exists. If this returns false, then this will affect the diff
78 // accordingly. If this function isn't set, it will not be called. It
79 // is highly recommended to set it. The *ResourceData passed to Exists
80 // should _not_ be modified.
81 Create CreateFunc
82 Read ReadFunc
83 Update UpdateFunc
84 Delete DeleteFunc
85 Exists ExistsFunc
86
87 // Importer is the ResourceImporter implementation for this resource.
88 // If this is nil, then this resource does not support importing. If
89 // this is non-nil, then it supports importing and ResourceImporter
90 // must be validated. The validity of ResourceImporter is verified
91 // by InternalValidate on Resource.
92 Importer *ResourceImporter
93
94 // If non-empty, this string is emitted as a warning during Validate.
95 // This is a private interface for now, for use by DataSourceResourceShim,
96 // and not for general use. (But maybe later...)
97 deprecationMessage string
98
99 // Timeouts allow users to specify specific time durations in which an
100 // operation should time out, to allow them to extend an action to suit their
101 // usage. For example, a user may specify a large Creation timeout for their
102 // AWS RDS Instance due to it's size, or restoring from a snapshot.
103 // Resource implementors must enable Timeout support by adding the allowed
104 // actions (Create, Read, Update, Delete, Default) to the Resource struct, and
105 // accessing them in the matching methods.
106 Timeouts *ResourceTimeout
107}
108
109// See Resource documentation.
110type CreateFunc func(*ResourceData, interface{}) error
111
112// See Resource documentation.
113type ReadFunc func(*ResourceData, interface{}) error
114
115// See Resource documentation.
116type UpdateFunc func(*ResourceData, interface{}) error
117
118// See Resource documentation.
119type DeleteFunc func(*ResourceData, interface{}) error
120
121// See Resource documentation.
122type ExistsFunc func(*ResourceData, interface{}) (bool, error)
123
124// See Resource documentation.
125type StateMigrateFunc func(
126 int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error)
127
128// Apply creates, updates, and/or deletes a resource.
129func (r *Resource) Apply(
130 s *terraform.InstanceState,
131 d *terraform.InstanceDiff,
132 meta interface{}) (*terraform.InstanceState, error) {
133 data, err := schemaMap(r.Schema).Data(s, d)
134 if err != nil {
135 return s, err
136 }
137
138 // Instance Diff shoould have the timeout info, need to copy it over to the
139 // ResourceData meta
140 rt := ResourceTimeout{}
141 if _, ok := d.Meta[TimeoutKey]; ok {
142 if err := rt.DiffDecode(d); err != nil {
143 log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
144 }
145 } else {
146 log.Printf("[DEBUG] No meta timeoutkey found in Apply()")
147 }
148 data.timeouts = &rt
149
150 if s == nil {
151 // The Terraform API dictates that this should never happen, but
152 // it doesn't hurt to be safe in this case.
153 s = new(terraform.InstanceState)
154 }
155
156 if d.Destroy || d.RequiresNew() {
157 if s.ID != "" {
158 // Destroy the resource since it is created
159 if err := r.Delete(data, meta); err != nil {
160 return r.recordCurrentSchemaVersion(data.State()), err
161 }
162
163 // Make sure the ID is gone.
164 data.SetId("")
165 }
166
167 // If we're only destroying, and not creating, then return
168 // now since we're done!
169 if !d.RequiresNew() {
170 return nil, nil
171 }
172
173 // Reset the data to be stateless since we just destroyed
174 data, err = schemaMap(r.Schema).Data(nil, d)
175 // data was reset, need to re-apply the parsed timeouts
176 data.timeouts = &rt
177 if err != nil {
178 return nil, err
179 }
180 }
181
182 err = nil
183 if data.Id() == "" {
184 // We're creating, it is a new resource.
185 data.MarkNewResource()
186 err = r.Create(data, meta)
187 } else {
188 if r.Update == nil {
189 return s, fmt.Errorf("doesn't support update")
190 }
191
192 err = r.Update(data, meta)
193 }
194
195 return r.recordCurrentSchemaVersion(data.State()), err
196}
197
198// Diff returns a diff of this resource and is API compatible with the
199// ResourceProvider interface.
200func (r *Resource) Diff(
201 s *terraform.InstanceState,
202 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
203
204 t := &ResourceTimeout{}
205 err := t.ConfigDecode(r, c)
206
207 if err != nil {
208 return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
209 }
210
211 instanceDiff, err := schemaMap(r.Schema).Diff(s, c)
212 if err != nil {
213 return instanceDiff, err
214 }
215
216 if instanceDiff != nil {
217 if err := t.DiffEncode(instanceDiff); err != nil {
218 log.Printf("[ERR] Error encoding timeout to instance diff: %s", err)
219 }
220 } else {
221 log.Printf("[DEBUG] Instance Diff is nil in Diff()")
222 }
223
224 return instanceDiff, err
225}
226
227// Validate validates the resource configuration against the schema.
228func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) {
229 warns, errs := schemaMap(r.Schema).Validate(c)
230
231 if r.deprecationMessage != "" {
232 warns = append(warns, r.deprecationMessage)
233 }
234
235 return warns, errs
236}
237
238// ReadDataApply loads the data for a data source, given a diff that
239// describes the configuration arguments and desired computed attributes.
240func (r *Resource) ReadDataApply(
241 d *terraform.InstanceDiff,
242 meta interface{},
243) (*terraform.InstanceState, error) {
244
245 // Data sources are always built completely from scratch
246 // on each read, so the source state is always nil.
247 data, err := schemaMap(r.Schema).Data(nil, d)
248 if err != nil {
249 return nil, err
250 }
251
252 err = r.Read(data, meta)
253 state := data.State()
254 if state != nil && state.ID == "" {
255 // Data sources can set an ID if they want, but they aren't
256 // required to; we'll provide a placeholder if they don't,
257 // to preserve the invariant that all resources have non-empty
258 // ids.
259 state.ID = "-"
260 }
261
262 return r.recordCurrentSchemaVersion(state), err
263}
264
265// Refresh refreshes the state of the resource.
266func (r *Resource) Refresh(
267 s *terraform.InstanceState,
268 meta interface{}) (*terraform.InstanceState, error) {
269 // If the ID is already somehow blank, it doesn't exist
270 if s.ID == "" {
271 return nil, nil
272 }
273
274 rt := ResourceTimeout{}
275 if _, ok := s.Meta[TimeoutKey]; ok {
276 if err := rt.StateDecode(s); err != nil {
277 log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
278 }
279 }
280
281 if r.Exists != nil {
282 // Make a copy of data so that if it is modified it doesn't
283 // affect our Read later.
284 data, err := schemaMap(r.Schema).Data(s, nil)
285 data.timeouts = &rt
286
287 if err != nil {
288 return s, err
289 }
290
291 exists, err := r.Exists(data, meta)
292 if err != nil {
293 return s, err
294 }
295 if !exists {
296 return nil, nil
297 }
298 }
299
300 needsMigration, stateSchemaVersion := r.checkSchemaVersion(s)
301 if needsMigration && r.MigrateState != nil {
302 s, err := r.MigrateState(stateSchemaVersion, s, meta)
303 if err != nil {
304 return s, err
305 }
306 }
307
308 data, err := schemaMap(r.Schema).Data(s, nil)
309 data.timeouts = &rt
310 if err != nil {
311 return s, err
312 }
313
314 err = r.Read(data, meta)
315 state := data.State()
316 if state != nil && state.ID == "" {
317 state = nil
318 }
319
320 return r.recordCurrentSchemaVersion(state), err
321}
322
323// InternalValidate should be called to validate the structure
324// of the resource.
325//
326// This should be called in a unit test for any resource to verify
327// before release that a resource is properly configured for use with
328// this library.
329//
330// Provider.InternalValidate() will automatically call this for all of
331// the resources it manages, so you don't need to call this manually if it
332// is part of a Provider.
333func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error {
334 if r == nil {
335 return errors.New("resource is nil")
336 }
337
338 if !writable {
339 if r.Create != nil || r.Update != nil || r.Delete != nil {
340 return fmt.Errorf("must not implement Create, Update or Delete")
341 }
342 }
343
344 tsm := topSchemaMap
345
346 if r.isTopLevel() && writable {
347 // All non-Computed attributes must be ForceNew if Update is not defined
348 if r.Update == nil {
349 nonForceNewAttrs := make([]string, 0)
350 for k, v := range r.Schema {
351 if !v.ForceNew && !v.Computed {
352 nonForceNewAttrs = append(nonForceNewAttrs, k)
353 }
354 }
355 if len(nonForceNewAttrs) > 0 {
356 return fmt.Errorf(
357 "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs)
358 }
359 } else {
360 nonUpdateableAttrs := make([]string, 0)
361 for k, v := range r.Schema {
362 if v.ForceNew || v.Computed && !v.Optional {
363 nonUpdateableAttrs = append(nonUpdateableAttrs, k)
364 }
365 }
366 updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs)
367 if updateableAttrs == 0 {
368 return fmt.Errorf(
369 "All fields are ForceNew or Computed w/out Optional, Update is superfluous")
370 }
371 }
372
373 tsm = schemaMap(r.Schema)
374
375 // Destroy, and Read are required
376 if r.Read == nil {
377 return fmt.Errorf("Read must be implemented")
378 }
379 if r.Delete == nil {
380 return fmt.Errorf("Delete must be implemented")
381 }
382
383 // If we have an importer, we need to verify the importer.
384 if r.Importer != nil {
385 if err := r.Importer.InternalValidate(); err != nil {
386 return err
387 }
388 }
389 }
390
391 return schemaMap(r.Schema).InternalValidate(tsm)
392}
393
394// Data returns a ResourceData struct for this Resource. Each return value
395// is a separate copy and can be safely modified differently.
396//
397// The data returned from this function has no actual affect on the Resource
398// itself (including the state given to this function).
399//
400// This function is useful for unit tests and ResourceImporter functions.
401func (r *Resource) Data(s *terraform.InstanceState) *ResourceData {
402 result, err := schemaMap(r.Schema).Data(s, nil)
403 if err != nil {
404 // At the time of writing, this isn't possible (Data never returns
405 // non-nil errors). We panic to find this in the future if we have to.
406 // I don't see a reason for Data to ever return an error.
407 panic(err)
408 }
409
410 // Set the schema version to latest by default
411 result.meta = map[string]interface{}{
412 "schema_version": strconv.Itoa(r.SchemaVersion),
413 }
414
415 return result
416}
417
418// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing
419//
420// TODO: May be able to be removed with the above ResourceData function.
421func (r *Resource) TestResourceData() *ResourceData {
422 return &ResourceData{
423 schema: r.Schema,
424 }
425}
426
427// Returns true if the resource is "top level" i.e. not a sub-resource.
428func (r *Resource) isTopLevel() bool {
429 // TODO: This is a heuristic; replace with a definitive attribute?
430 return r.Create != nil
431}
432
433// Determines if a given InstanceState needs to be migrated by checking the
434// stored version number with the current SchemaVersion
435func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) {
436 // Get the raw interface{} value for the schema version. If it doesn't
437 // exist or is nil then set it to zero.
438 raw := is.Meta["schema_version"]
439 if raw == nil {
440 raw = "0"
441 }
442
443 // Try to convert it to a string. If it isn't a string then we pretend
444 // that it isn't set at all. It should never not be a string unless it
445 // was manually tampered with.
446 rawString, ok := raw.(string)
447 if !ok {
448 rawString = "0"
449 }
450
451 stateSchemaVersion, _ := strconv.Atoi(rawString)
452 return stateSchemaVersion < r.SchemaVersion, stateSchemaVersion
453}
454
455func (r *Resource) recordCurrentSchemaVersion(
456 state *terraform.InstanceState) *terraform.InstanceState {
457 if state != nil && r.SchemaVersion > 0 {
458 if state.Meta == nil {
459 state.Meta = make(map[string]interface{})
460 }
461 state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion)
462 }
463 return state
464}
465
466// Noop is a convenience implementation of resource function which takes
467// no action and returns no error.
468func Noop(*ResourceData, interface{}) error {
469 return nil
470}
471
472// RemoveFromState is a convenience implementation of a resource function
473// which sets the resource ID to empty string (to remove it from state)
474// and returns no error.
475func RemoveFromState(d *ResourceData, _ interface{}) error {
476 d.SetId("")
477 return nil
478}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
new file mode 100644
index 0000000..b2bc8f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
@@ -0,0 +1,502 @@
1package schema
2
3import (
4 "log"
5 "reflect"
6 "strings"
7 "sync"
8 "time"
9
10 "github.com/hashicorp/terraform/terraform"
11)
12
13// ResourceData is used to query and set the attributes of a resource.
14//
15// ResourceData is the primary argument received for CRUD operations on
16// a resource as well as configuration of a provider. It is a powerful
17// structure that can be used to not only query data, but check for changes,
18// define partial state updates, etc.
19//
20// The most relevant methods to take a look at are Get, Set, and Partial.
21type ResourceData struct {
22 // Settable (internally)
23 schema map[string]*Schema
24 config *terraform.ResourceConfig
25 state *terraform.InstanceState
26 diff *terraform.InstanceDiff
27 meta map[string]interface{}
28 timeouts *ResourceTimeout
29
30 // Don't set
31 multiReader *MultiLevelFieldReader
32 setWriter *MapFieldWriter
33 newState *terraform.InstanceState
34 partial bool
35 partialMap map[string]struct{}
36 once sync.Once
37 isNew bool
38}
39
40// getResult is the internal structure that is generated when a Get
41// is called that contains some extra data that might be used.
42type getResult struct {
43 Value interface{}
44 ValueProcessed interface{}
45 Computed bool
46 Exists bool
47 Schema *Schema
48}
49
50// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary
51// values, bypassing schema. This MUST NOT be used in normal circumstances -
52// it exists only to support the remote_state data source.
53func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) {
54 d.once.Do(d.init)
55
56 d.setWriter.unsafeWriteField(key, value)
57}
58
59// Get returns the data for the given key, or nil if the key doesn't exist
60// in the schema.
61//
62// If the key does exist in the schema but doesn't exist in the configuration,
63// then the default value for that type will be returned. For strings, this is
64// "", for numbers it is 0, etc.
65//
66// If you want to test if something is set at all in the configuration,
67// use GetOk.
68func (d *ResourceData) Get(key string) interface{} {
69 v, _ := d.GetOk(key)
70 return v
71}
72
73// GetChange returns the old and new value for a given key.
74//
75// HasChange should be used to check if a change exists. It is possible
76// that both the old and new value are the same if the old value was not
77// set and the new value is. This is common, for example, for boolean
78// fields which have a zero value of false.
79func (d *ResourceData) GetChange(key string) (interface{}, interface{}) {
80 o, n := d.getChange(key, getSourceState, getSourceDiff)
81 return o.Value, n.Value
82}
83
84// GetOk returns the data for the given key and whether or not the key
85// has been set to a non-zero value at some point.
86//
87// The first result will not necessarilly be nil if the value doesn't exist.
88// The second result should be checked to determine this information.
89func (d *ResourceData) GetOk(key string) (interface{}, bool) {
90 r := d.getRaw(key, getSourceSet)
91 exists := r.Exists && !r.Computed
92 if exists {
93 // If it exists, we also want to verify it is not the zero-value.
94 value := r.Value
95 zero := r.Schema.Type.Zero()
96
97 if eq, ok := value.(Equal); ok {
98 exists = !eq.Equal(zero)
99 } else {
100 exists = !reflect.DeepEqual(value, zero)
101 }
102 }
103
104 return r.Value, exists
105}
106
107func (d *ResourceData) getRaw(key string, level getSource) getResult {
108 var parts []string
109 if key != "" {
110 parts = strings.Split(key, ".")
111 }
112
113 return d.get(parts, level)
114}
115
116// HasChange returns whether or not the given key has been changed.
117func (d *ResourceData) HasChange(key string) bool {
118 o, n := d.GetChange(key)
119
120 // If the type implements the Equal interface, then call that
121 // instead of just doing a reflect.DeepEqual. An example where this is
122 // needed is *Set
123 if eq, ok := o.(Equal); ok {
124 return !eq.Equal(n)
125 }
126
127 return !reflect.DeepEqual(o, n)
128}
129
130// Partial turns partial state mode on/off.
131//
132// When partial state mode is enabled, then only key prefixes specified
133// by SetPartial will be in the final state. This allows providers to return
134// partial states for partially applied resources (when errors occur).
135func (d *ResourceData) Partial(on bool) {
136 d.partial = on
137 if on {
138 if d.partialMap == nil {
139 d.partialMap = make(map[string]struct{})
140 }
141 } else {
142 d.partialMap = nil
143 }
144}
145
146// Set sets the value for the given key.
147//
148// If the key is invalid or the value is not a correct type, an error
149// will be returned.
150func (d *ResourceData) Set(key string, value interface{}) error {
151 d.once.Do(d.init)
152
153 // If the value is a pointer to a non-struct, get its value and
154 // use that. This allows Set to take a pointer to primitives to
155 // simplify the interface.
156 reflectVal := reflect.ValueOf(value)
157 if reflectVal.Kind() == reflect.Ptr {
158 if reflectVal.IsNil() {
159 // If the pointer is nil, then the value is just nil
160 value = nil
161 } else {
162 // Otherwise, we dereference the pointer as long as its not
163 // a pointer to a struct, since struct pointers are allowed.
164 reflectVal = reflect.Indirect(reflectVal)
165 if reflectVal.Kind() != reflect.Struct {
166 value = reflectVal.Interface()
167 }
168 }
169 }
170
171 return d.setWriter.WriteField(strings.Split(key, "."), value)
172}
173
174// SetPartial adds the key to the final state output while
175// in partial state mode. The key must be a root key in the schema (i.e.
176// it cannot be "list.0").
177//
178// If partial state mode is disabled, then this has no effect. Additionally,
179// whenever partial state mode is toggled, the partial data is cleared.
180func (d *ResourceData) SetPartial(k string) {
181 if d.partial {
182 d.partialMap[k] = struct{}{}
183 }
184}
185
186func (d *ResourceData) MarkNewResource() {
187 d.isNew = true
188}
189
190func (d *ResourceData) IsNewResource() bool {
191 return d.isNew
192}
193
194// Id returns the ID of the resource.
195func (d *ResourceData) Id() string {
196 var result string
197
198 if d.state != nil {
199 result = d.state.ID
200 }
201
202 if d.newState != nil {
203 result = d.newState.ID
204 }
205
206 return result
207}
208
209// ConnInfo returns the connection info for this resource.
210func (d *ResourceData) ConnInfo() map[string]string {
211 if d.newState != nil {
212 return d.newState.Ephemeral.ConnInfo
213 }
214
215 if d.state != nil {
216 return d.state.Ephemeral.ConnInfo
217 }
218
219 return nil
220}
221
222// SetId sets the ID of the resource. If the value is blank, then the
223// resource is destroyed.
224func (d *ResourceData) SetId(v string) {
225 d.once.Do(d.init)
226 d.newState.ID = v
227}
228
229// SetConnInfo sets the connection info for a resource.
230func (d *ResourceData) SetConnInfo(v map[string]string) {
231 d.once.Do(d.init)
232 d.newState.Ephemeral.ConnInfo = v
233}
234
235// SetType sets the ephemeral type for the data. This is only required
236// for importing.
237func (d *ResourceData) SetType(t string) {
238 d.once.Do(d.init)
239 d.newState.Ephemeral.Type = t
240}
241
242// State returns the new InstanceState after the diff and any Set
243// calls.
244func (d *ResourceData) State() *terraform.InstanceState {
245 var result terraform.InstanceState
246 result.ID = d.Id()
247 result.Meta = d.meta
248
249 // If we have no ID, then this resource doesn't exist and we just
250 // return nil.
251 if result.ID == "" {
252 return nil
253 }
254
255 if d.timeouts != nil {
256 if err := d.timeouts.StateEncode(&result); err != nil {
257 log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err)
258 }
259 }
260
261 // Look for a magic key in the schema that determines we skip the
262 // integrity check of fields existing in the schema, allowing dynamic
263 // keys to be created.
264 hasDynamicAttributes := false
265 for k, _ := range d.schema {
266 if k == "__has_dynamic_attributes" {
267 hasDynamicAttributes = true
268 log.Printf("[INFO] Resource %s has dynamic attributes", result.ID)
269 }
270 }
271
272 // In order to build the final state attributes, we read the full
273 // attribute set as a map[string]interface{}, write it to a MapFieldWriter,
274 // and then use that map.
275 rawMap := make(map[string]interface{})
276 for k := range d.schema {
277 source := getSourceSet
278 if d.partial {
279 source = getSourceState
280 if _, ok := d.partialMap[k]; ok {
281 source = getSourceSet
282 }
283 }
284
285 raw := d.get([]string{k}, source)
286 if raw.Exists && !raw.Computed {
287 rawMap[k] = raw.Value
288 if raw.ValueProcessed != nil {
289 rawMap[k] = raw.ValueProcessed
290 }
291 }
292 }
293
294 mapW := &MapFieldWriter{Schema: d.schema}
295 if err := mapW.WriteField(nil, rawMap); err != nil {
296 return nil
297 }
298
299 result.Attributes = mapW.Map()
300
301 if hasDynamicAttributes {
302 // If we have dynamic attributes, just copy the attributes map
303 // one for one into the result attributes.
304 for k, v := range d.setWriter.Map() {
305 // Don't clobber schema values. This limits usage of dynamic
306 // attributes to names which _do not_ conflict with schema
307 // keys!
308 if _, ok := result.Attributes[k]; !ok {
309 result.Attributes[k] = v
310 }
311 }
312 }
313
314 if d.newState != nil {
315 result.Ephemeral = d.newState.Ephemeral
316 }
317
318 // TODO: This is hacky and we can remove this when we have a proper
319 // state writer. We should instead have a proper StateFieldWriter
320 // and use that.
321 for k, schema := range d.schema {
322 if schema.Type != TypeMap {
323 continue
324 }
325
326 if result.Attributes[k] == "" {
327 delete(result.Attributes, k)
328 }
329 }
330
331 if v := d.Id(); v != "" {
332 result.Attributes["id"] = d.Id()
333 }
334
335 if d.state != nil {
336 result.Tainted = d.state.Tainted
337 }
338
339 return &result
340}
341
342// Timeout returns the data for the given timeout key
343// Returns a duration of 20 minutes for any key not found, or not found and no default.
344func (d *ResourceData) Timeout(key string) time.Duration {
345 key = strings.ToLower(key)
346
347 var timeout *time.Duration
348 switch key {
349 case TimeoutCreate:
350 timeout = d.timeouts.Create
351 case TimeoutRead:
352 timeout = d.timeouts.Read
353 case TimeoutUpdate:
354 timeout = d.timeouts.Update
355 case TimeoutDelete:
356 timeout = d.timeouts.Delete
357 }
358
359 if timeout != nil {
360 return *timeout
361 }
362
363 if d.timeouts.Default != nil {
364 return *d.timeouts.Default
365 }
366
367 // Return system default of 20 minutes
368 return 20 * time.Minute
369}
370
371func (d *ResourceData) init() {
372 // Initialize the field that will store our new state
373 var copyState terraform.InstanceState
374 if d.state != nil {
375 copyState = *d.state.DeepCopy()
376 }
377 d.newState = &copyState
378
379 // Initialize the map for storing set data
380 d.setWriter = &MapFieldWriter{Schema: d.schema}
381
382 // Initialize the reader for getting data from the
383 // underlying sources (config, diff, etc.)
384 readers := make(map[string]FieldReader)
385 var stateAttributes map[string]string
386 if d.state != nil {
387 stateAttributes = d.state.Attributes
388 readers["state"] = &MapFieldReader{
389 Schema: d.schema,
390 Map: BasicMapReader(stateAttributes),
391 }
392 }
393 if d.config != nil {
394 readers["config"] = &ConfigFieldReader{
395 Schema: d.schema,
396 Config: d.config,
397 }
398 }
399 if d.diff != nil {
400 readers["diff"] = &DiffFieldReader{
401 Schema: d.schema,
402 Diff: d.diff,
403 Source: &MultiLevelFieldReader{
404 Levels: []string{"state", "config"},
405 Readers: readers,
406 },
407 }
408 }
409 readers["set"] = &MapFieldReader{
410 Schema: d.schema,
411 Map: BasicMapReader(d.setWriter.Map()),
412 }
413 d.multiReader = &MultiLevelFieldReader{
414 Levels: []string{
415 "state",
416 "config",
417 "diff",
418 "set",
419 },
420
421 Readers: readers,
422 }
423}
424
425func (d *ResourceData) diffChange(
426 k string) (interface{}, interface{}, bool, bool) {
427 // Get the change between the state and the config.
428 o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact)
429 if !o.Exists {
430 o.Value = nil
431 }
432 if !n.Exists {
433 n.Value = nil
434 }
435
436 // Return the old, new, and whether there is a change
437 return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed
438}
439
440func (d *ResourceData) getChange(
441 k string,
442 oldLevel getSource,
443 newLevel getSource) (getResult, getResult) {
444 var parts, parts2 []string
445 if k != "" {
446 parts = strings.Split(k, ".")
447 parts2 = strings.Split(k, ".")
448 }
449
450 o := d.get(parts, oldLevel)
451 n := d.get(parts2, newLevel)
452 return o, n
453}
454
455func (d *ResourceData) get(addr []string, source getSource) getResult {
456 d.once.Do(d.init)
457
458 level := "set"
459 flags := source & ^getSourceLevelMask
460 exact := flags&getSourceExact != 0
461 source = source & getSourceLevelMask
462 if source >= getSourceSet {
463 level = "set"
464 } else if source >= getSourceDiff {
465 level = "diff"
466 } else if source >= getSourceConfig {
467 level = "config"
468 } else {
469 level = "state"
470 }
471
472 var result FieldReadResult
473 var err error
474 if exact {
475 result, err = d.multiReader.ReadFieldExact(addr, level)
476 } else {
477 result, err = d.multiReader.ReadFieldMerge(addr, level)
478 }
479 if err != nil {
480 panic(err)
481 }
482
483 // If the result doesn't exist, then we set the value to the zero value
484 var schema *Schema
485 if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 {
486 schema = schemaL[len(schemaL)-1]
487 }
488
489 if result.Value == nil && schema != nil {
490 result.Value = result.ValueOrZero(schema)
491 }
492
493 // Transform the FieldReadResult into a getResult. It might be worth
494 // merging these two structures one day.
495 return getResult{
496 Value: result.Value,
497 ValueProcessed: result.ValueProcessed,
498 Computed: result.Computed,
499 Exists: result.Exists,
500 Schema: schema,
501 }
502}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go
new file mode 100644
index 0000000..7dd655d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go
@@ -0,0 +1,17 @@
1package schema
2
3//go:generate stringer -type=getSource resource_data_get_source.go
4
5// getSource represents the level we want to get for a value (internally).
6// Any source less than or equal to the level will be loaded (whichever
7// has a value first).
8type getSource byte
9
10const (
11 getSourceState getSource = 1 << iota
12 getSourceConfig
13 getSourceDiff
14 getSourceSet
15 getSourceExact // Only get from the _exact_ level
16 getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet
17)
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go
new file mode 100644
index 0000000..5dada3c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go
@@ -0,0 +1,52 @@
1package schema
2
3// ResourceImporter defines how a resource is imported in Terraform. This
4// can be set onto a Resource struct to make it Importable. Not all resources
5// have to be importable; if a Resource doesn't have a ResourceImporter then
6// it won't be importable.
7//
8// "Importing" in Terraform is the process of taking an already-created
9// resource and bringing it under Terraform management. This can include
10// updating Terraform state, generating Terraform configuration, etc.
11type ResourceImporter struct {
12 // The functions below must all be implemented for importing to work.
13
14 // State is called to convert an ID to one or more InstanceState to
15 // insert into the Terraform state. If this isn't specified, then
16 // the ID is passed straight through.
17 State StateFunc
18}
19
20// StateFunc is the function called to import a resource into the
21// Terraform state. It is given a ResourceData with only ID set. This
22// ID is going to be an arbitrary value given by the user and may not map
23// directly to the ID format that the resource expects, so that should
24// be validated.
25//
26// This should return a slice of ResourceData that turn into the state
27// that was imported. This might be as simple as returning only the argument
28// that was given to the function. In other cases (such as AWS security groups),
29// an import may fan out to multiple resources and this will have to return
30// multiple.
31//
32// To create the ResourceData structures for other resource types (if
33// you have to), instantiate your resource and call the Data function.
34type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error)
35
36// InternalValidate should be called to validate the structure of this
37// importer. This should be called in a unit test.
38//
39// Resource.InternalValidate() will automatically call this, so this doesn't
40// need to be called manually. Further, Resource.InternalValidate() is
41// automatically called by Provider.InternalValidate(), so you only need
42// to internal validate the provider.
43func (r *ResourceImporter) InternalValidate() error {
44 return nil
45}
46
47// ImportStatePassthrough is an implementation of StateFunc that can be
48// used to simply pass the ID directly through. This should be used only
49// in the case that an ID-only refresh is possible.
50func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) {
51 return []*ResourceData{d}, nil
52}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
new file mode 100644
index 0000000..445819f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
@@ -0,0 +1,237 @@
1package schema
2
3import (
4 "fmt"
5 "log"
6 "time"
7
8 "github.com/hashicorp/terraform/terraform"
9 "github.com/mitchellh/copystructure"
10)
11
12const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0"
13const TimeoutsConfigKey = "timeouts"
14
15const (
16 TimeoutCreate = "create"
17 TimeoutRead = "read"
18 TimeoutUpdate = "update"
19 TimeoutDelete = "delete"
20 TimeoutDefault = "default"
21)
22
23func timeoutKeys() []string {
24 return []string{
25 TimeoutCreate,
26 TimeoutRead,
27 TimeoutUpdate,
28 TimeoutDelete,
29 TimeoutDefault,
30 }
31}
32
33// could be time.Duration, int64 or float64
34func DefaultTimeout(tx interface{}) *time.Duration {
35 var td time.Duration
36 switch raw := tx.(type) {
37 case time.Duration:
38 return &raw
39 case int64:
40 td = time.Duration(raw)
41 case float64:
42 td = time.Duration(int64(raw))
43 default:
44 log.Printf("[WARN] Unknown type in DefaultTimeout: %#v", tx)
45 }
46 return &td
47}
48
49type ResourceTimeout struct {
50 Create, Read, Update, Delete, Default *time.Duration
51}
52
53// ConfigDecode takes a schema and the configuration (available in Diff) and
54// validates, parses the timeouts into `t`
55func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) error {
56 if s.Timeouts != nil {
57 raw, err := copystructure.Copy(s.Timeouts)
58 if err != nil {
59 log.Printf("[DEBUG] Error with deep copy: %s", err)
60 }
61 *t = *raw.(*ResourceTimeout)
62 }
63
64 if raw, ok := c.Config[TimeoutsConfigKey]; ok {
65 if configTimeouts, ok := raw.([]map[string]interface{}); ok {
66 for _, timeoutValues := range configTimeouts {
67 // loop through each Timeout given in the configuration and validate they
68 // the Timeout defined in the resource
69 for timeKey, timeValue := range timeoutValues {
70 // validate that we're dealing with the normal CRUD actions
71 var found bool
72 for _, key := range timeoutKeys() {
73 if timeKey == key {
74 found = true
75 break
76 }
77 }
78
79 if !found {
80 return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey)
81 }
82
83 // Get timeout
84 rt, err := time.ParseDuration(timeValue.(string))
85 if err != nil {
86 return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err)
87 }
88
89 var timeout *time.Duration
90 switch timeKey {
91 case TimeoutCreate:
92 timeout = t.Create
93 case TimeoutUpdate:
94 timeout = t.Update
95 case TimeoutRead:
96 timeout = t.Read
97 case TimeoutDelete:
98 timeout = t.Delete
99 case TimeoutDefault:
100 timeout = t.Default
101 }
102
103 // If the resource has not delcared this in the definition, then error
104 // with an unsupported message
105 if timeout == nil {
106 return unsupportedTimeoutKeyError(timeKey)
107 }
108
109 *timeout = rt
110 }
111 }
112 } else {
113 log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts")
114 }
115 }
116
117 return nil
118}
119
120func unsupportedTimeoutKeyError(key string) error {
121 return fmt.Errorf("Timeout Key (%s) is not supported", key)
122}
123
124// DiffEncode, StateEncode, and MetaDecode are analogous to the Go stdlib JSONEncoder
125// interface: they encode/decode a timeouts struct from an instance diff, which is
126// where the timeout data is stored after a diff to pass into Apply.
127//
128// StateEncode encodes the timeout into the ResourceData's InstanceState for
129// saving to state
130//
131func (t *ResourceTimeout) DiffEncode(id *terraform.InstanceDiff) error {
132 return t.metaEncode(id)
133}
134
135func (t *ResourceTimeout) StateEncode(is *terraform.InstanceState) error {
136 return t.metaEncode(is)
137}
138
139// metaEncode encodes the ResourceTimeout into a map[string]interface{} format
140// and stores it in the Meta field of the interface it's given.
141// Assumes the interface is either *terraform.InstanceState or
142// *terraform.InstanceDiff, returns an error otherwise
143func (t *ResourceTimeout) metaEncode(ids interface{}) error {
144 m := make(map[string]interface{})
145
146 if t.Create != nil {
147 m[TimeoutCreate] = t.Create.Nanoseconds()
148 }
149 if t.Read != nil {
150 m[TimeoutRead] = t.Read.Nanoseconds()
151 }
152 if t.Update != nil {
153 m[TimeoutUpdate] = t.Update.Nanoseconds()
154 }
155 if t.Delete != nil {
156 m[TimeoutDelete] = t.Delete.Nanoseconds()
157 }
158 if t.Default != nil {
159 m[TimeoutDefault] = t.Default.Nanoseconds()
160 // for any key above that is nil, if default is specified, we need to
161 // populate it with the default
162 for _, k := range timeoutKeys() {
163 if _, ok := m[k]; !ok {
164 m[k] = t.Default.Nanoseconds()
165 }
166 }
167 }
168
169 // only add the Timeout to the Meta if we have values
170 if len(m) > 0 {
171 switch instance := ids.(type) {
172 case *terraform.InstanceDiff:
173 if instance.Meta == nil {
174 instance.Meta = make(map[string]interface{})
175 }
176 instance.Meta[TimeoutKey] = m
177 case *terraform.InstanceState:
178 if instance.Meta == nil {
179 instance.Meta = make(map[string]interface{})
180 }
181 instance.Meta[TimeoutKey] = m
182 default:
183 return fmt.Errorf("Error matching type for Diff Encode")
184 }
185 }
186
187 return nil
188}
189
190func (t *ResourceTimeout) StateDecode(id *terraform.InstanceState) error {
191 return t.metaDecode(id)
192}
193func (t *ResourceTimeout) DiffDecode(is *terraform.InstanceDiff) error {
194 return t.metaDecode(is)
195}
196
197func (t *ResourceTimeout) metaDecode(ids interface{}) error {
198 var rawMeta interface{}
199 var ok bool
200 switch rawInstance := ids.(type) {
201 case *terraform.InstanceDiff:
202 rawMeta, ok = rawInstance.Meta[TimeoutKey]
203 if !ok {
204 return nil
205 }
206 case *terraform.InstanceState:
207 rawMeta, ok = rawInstance.Meta[TimeoutKey]
208 if !ok {
209 return nil
210 }
211 default:
212 return fmt.Errorf("Unknown or unsupported type in metaDecode: %#v", ids)
213 }
214
215 times := rawMeta.(map[string]interface{})
216 if len(times) == 0 {
217 return nil
218 }
219
220 if v, ok := times[TimeoutCreate]; ok {
221 t.Create = DefaultTimeout(v)
222 }
223 if v, ok := times[TimeoutRead]; ok {
224 t.Read = DefaultTimeout(v)
225 }
226 if v, ok := times[TimeoutUpdate]; ok {
227 t.Update = DefaultTimeout(v)
228 }
229 if v, ok := times[TimeoutDelete]; ok {
230 t.Delete = DefaultTimeout(v)
231 }
232 if v, ok := times[TimeoutDefault]; ok {
233 t.Default = DefaultTimeout(v)
234 }
235
236 return nil
237}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
new file mode 100644
index 0000000..32d1721
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
@@ -0,0 +1,1537 @@
1// schema is a high-level framework for easily writing new providers
2// for Terraform. Usage of schema is recommended over attempting to write
3// to the low-level plugin interfaces manually.
4//
5// schema breaks down provider creation into simple CRUD operations for
6// resources. The logic of diffing, destroying before creating, updating
7// or creating, etc. is all handled by the framework. The plugin author
8// only needs to implement a configuration schema and the CRUD operations and
9// everything else is meant to just work.
10//
11// A good starting point is to view the Provider structure.
12package schema
13
14import (
15 "fmt"
16 "os"
17 "reflect"
18 "sort"
19 "strconv"
20 "strings"
21
22 "github.com/hashicorp/terraform/terraform"
23 "github.com/mitchellh/mapstructure"
24)
25
26// type used for schema package context keys
27type contextKey string
28
29// Schema is used to describe the structure of a value.
30//
31// Read the documentation of the struct elements for important details.
32type Schema struct {
33 // Type is the type of the value and must be one of the ValueType values.
34 //
35 // This type not only determines what type is expected/valid in configuring
36 // this value, but also what type is returned when ResourceData.Get is
37 // called. The types returned by Get are:
38 //
39 // TypeBool - bool
40 // TypeInt - int
41 // TypeFloat - float64
42 // TypeString - string
43 // TypeList - []interface{}
44 // TypeMap - map[string]interface{}
45 // TypeSet - *schema.Set
46 //
47 Type ValueType
48
49 // If one of these is set, then this item can come from the configuration.
50 // Both cannot be set. If Optional is set, the value is optional. If
51 // Required is set, the value is required.
52 //
53 // One of these must be set if the value is not computed. That is:
54 // value either comes from the config, is computed, or is both.
55 Optional bool
56 Required bool
57
58 // If this is non-nil, the provided function will be used during diff
59 // of this field. If this is nil, a default diff for the type of the
60 // schema will be used.
61 //
62 // This allows comparison based on something other than primitive, list
63 // or map equality - for example SSH public keys may be considered
64 // equivalent regardless of trailing whitespace.
65 DiffSuppressFunc SchemaDiffSuppressFunc
66
67 // If this is non-nil, then this will be a default value that is used
68 // when this item is not set in the configuration.
69 //
70 // DefaultFunc can be specified to compute a dynamic default.
71 // Only one of Default or DefaultFunc can be set. If DefaultFunc is
72 // used then its return value should be stable to avoid generating
73 // confusing/perpetual diffs.
74 //
75 // Changing either Default or the return value of DefaultFunc can be
76 // a breaking change, especially if the attribute in question has
77 // ForceNew set. If a default needs to change to align with changing
78 // assumptions in an upstream API then it may be necessary to also use
79 // the MigrateState function on the resource to change the state to match,
80 // or have the Read function adjust the state value to align with the
81 // new default.
82 //
83 // If Required is true above, then Default cannot be set. DefaultFunc
84 // can be set with Required. If the DefaultFunc returns nil, then there
85 // will be no default and the user will be asked to fill it in.
86 //
87 // If either of these is set, then the user won't be asked for input
88 // for this key if the default is not nil.
89 Default interface{}
90 DefaultFunc SchemaDefaultFunc
91
92 // Description is used as the description for docs or asking for user
93 // input. It should be relatively short (a few sentences max) and should
94 // be formatted to fit a CLI.
95 Description string
96
97 // InputDefault is the default value to use for when inputs are requested.
98 // This differs from Default in that if Default is set, no input is
99 // asked for. If Input is asked, this will be the default value offered.
100 InputDefault string
101
102 // The fields below relate to diffs.
103 //
104 // If Computed is true, then the result of this value is computed
105 // (unless specified by config) on creation.
106 //
107 // If ForceNew is true, then a change in this resource necessitates
108 // the creation of a new resource.
109 //
110 // StateFunc is a function called to change the value of this before
111 // storing it in the state (and likewise before comparing for diffs).
112 // The use for this is for example with large strings, you may want
113 // to simply store the hash of it.
114 Computed bool
115 ForceNew bool
116 StateFunc SchemaStateFunc
117
118 // The following fields are only set for a TypeList or TypeSet Type.
119 //
120 // Elem must be either a *Schema or a *Resource only if the Type is
121 // TypeList, and represents what the element type is. If it is *Schema,
122 // the element type is just a simple value. If it is *Resource, the
123 // element type is a complex structure, potentially with its own lifecycle.
124 //
125 // MaxItems defines a maximum amount of items that can exist within a
126 // TypeSet or TypeList. Specific use cases would be if a TypeSet is being
127 // used to wrap a complex structure, however more than one instance would
128 // cause instability.
129 //
130 // MinItems defines a minimum amount of items that can exist within a
131 // TypeSet or TypeList. Specific use cases would be if a TypeSet is being
132 // used to wrap a complex structure, however less than one instance would
133 // cause instability.
134 //
135 // PromoteSingle, if true, will allow single elements to be standalone
136 // and promote them to a list. For example "foo" would be promoted to
137 // ["foo"] automatically. This is primarily for legacy reasons and the
138 // ambiguity is not recommended for new usage. Promotion is only allowed
139 // for primitive element types.
140 Elem interface{}
141 MaxItems int
142 MinItems int
143 PromoteSingle bool
144
145 // The following fields are only valid for a TypeSet type.
146 //
147 // Set defines a function to determine the unique ID of an item so that
148 // a proper set can be built.
149 Set SchemaSetFunc
150
151 // ComputedWhen is a set of queries on the configuration. Whenever any
152 // of these things is changed, it will require a recompute (this requires
153 // that Computed is set to true).
154 //
155 // NOTE: This currently does not work.
156 ComputedWhen []string
157
158 // ConflictsWith is a set of schema keys that conflict with this schema.
159 // This will only check that they're set in the _config_. This will not
160 // raise an error for a malfunctioning resource that sets a conflicting
161 // key.
162 ConflictsWith []string
163
164 // When Deprecated is set, this attribute is deprecated.
165 //
166 // A deprecated field still works, but will probably stop working in near
167 // future. This string is the message shown to the user with instructions on
168 // how to address the deprecation.
169 Deprecated string
170
171 // When Removed is set, this attribute has been removed from the schema
172 //
173 // Removed attributes can be left in the Schema to generate informative error
174 // messages for the user when they show up in resource configurations.
175 // This string is the message shown to the user with instructions on
176 // what do to about the removed attribute.
177 Removed string
178
179 // ValidateFunc allows individual fields to define arbitrary validation
180 // logic. It is yielded the provided config value as an interface{} that is
181 // guaranteed to be of the proper Schema type, and it can yield warnings or
182 // errors based on inspection of that value.
183 //
184 // ValidateFunc currently only works for primitive types.
185 ValidateFunc SchemaValidateFunc
186
187 // Sensitive ensures that the attribute's value does not get displayed in
188 // logs or regular output. It should be used for passwords or other
189 // secret fields. Future versions of Terraform may encrypt these
190 // values.
191 Sensitive bool
192}
193
194// SchemaDiffSuppresFunc is a function which can be used to determine
195// whether a detected diff on a schema element is "valid" or not, and
196// suppress it from the plan if necessary.
197//
198// Return true if the diff should be suppressed, false to retain it.
199type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool
200
201// SchemaDefaultFunc is a function called to return a default value for
202// a field.
203type SchemaDefaultFunc func() (interface{}, error)
204
205// EnvDefaultFunc is a helper function that returns the value of the
206// given environment variable, if one exists, or the default value
207// otherwise.
208func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc {
209 return func() (interface{}, error) {
210 if v := os.Getenv(k); v != "" {
211 return v, nil
212 }
213
214 return dv, nil
215 }
216}
217
218// MultiEnvDefaultFunc is a helper function that returns the value of the first
219// environment variable in the given list that returns a non-empty value. If
220// none of the environment variables return a value, the default value is
221// returned.
222func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc {
223 return func() (interface{}, error) {
224 for _, k := range ks {
225 if v := os.Getenv(k); v != "" {
226 return v, nil
227 }
228 }
229 return dv, nil
230 }
231}
232
233// SchemaSetFunc is a function that must return a unique ID for the given
234// element. This unique ID is used to store the element in a hash.
235type SchemaSetFunc func(interface{}) int
236
237// SchemaStateFunc is a function used to convert some type to a string
238// to be stored in the state.
239type SchemaStateFunc func(interface{}) string
240
241// SchemaValidateFunc is a function used to validate a single field in the
242// schema.
243type SchemaValidateFunc func(interface{}, string) ([]string, []error)
244
245func (s *Schema) GoString() string {
246 return fmt.Sprintf("*%#v", *s)
247}
248
249// Returns a default value for this schema by either reading Default or
250// evaluating DefaultFunc. If neither of these are defined, returns nil.
251func (s *Schema) DefaultValue() (interface{}, error) {
252 if s.Default != nil {
253 return s.Default, nil
254 }
255
256 if s.DefaultFunc != nil {
257 defaultValue, err := s.DefaultFunc()
258 if err != nil {
259 return nil, fmt.Errorf("error loading default: %s", err)
260 }
261 return defaultValue, nil
262 }
263
264 return nil, nil
265}
266
267// Returns a zero value for the schema.
268func (s *Schema) ZeroValue() interface{} {
269 // If it's a set then we'll do a bit of extra work to provide the
270 // right hashing function in our empty value.
271 if s.Type == TypeSet {
272 setFunc := s.Set
273 if setFunc == nil {
274 // Default set function uses the schema to hash the whole value
275 elem := s.Elem
276 switch t := elem.(type) {
277 case *Schema:
278 setFunc = HashSchema(t)
279 case *Resource:
280 setFunc = HashResource(t)
281 default:
282 panic("invalid set element type")
283 }
284 }
285 return &Set{F: setFunc}
286 } else {
287 return s.Type.Zero()
288 }
289}
290
291func (s *Schema) finalizeDiff(
292 d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff {
293 if d == nil {
294 return d
295 }
296
297 if s.Type == TypeBool {
298 normalizeBoolString := func(s string) string {
299 switch s {
300 case "0":
301 return "false"
302 case "1":
303 return "true"
304 }
305 return s
306 }
307 d.Old = normalizeBoolString(d.Old)
308 d.New = normalizeBoolString(d.New)
309 }
310
311 if s.Computed && !d.NewRemoved && d.New == "" {
312 // Computed attribute without a new value set
313 d.NewComputed = true
314 }
315
316 if s.ForceNew {
317 // ForceNew, mark that this field is requiring new under the
318 // following conditions, explained below:
319 //
320 // * Old != New - There is a change in value. This field
321 // is therefore causing a new resource.
322 //
323 // * NewComputed - This field is being computed, hence a
324 // potential change in value, mark as causing a new resource.
325 d.RequiresNew = d.Old != d.New || d.NewComputed
326 }
327
328 if d.NewRemoved {
329 return d
330 }
331
332 if s.Computed {
333 if d.Old != "" && d.New == "" {
334 // This is a computed value with an old value set already,
335 // just let it go.
336 return nil
337 }
338
339 if d.New == "" {
340 // Computed attribute without a new value set
341 d.NewComputed = true
342 }
343 }
344
345 if s.Sensitive {
346 // Set the Sensitive flag so output is hidden in the UI
347 d.Sensitive = true
348 }
349
350 return d
351}
352
353// schemaMap is a wrapper that adds nice functions on top of schemas.
354type schemaMap map[string]*Schema
355
356// Data returns a ResourceData for the given schema, state, and diff.
357//
358// The diff is optional.
359func (m schemaMap) Data(
360 s *terraform.InstanceState,
361 d *terraform.InstanceDiff) (*ResourceData, error) {
362 return &ResourceData{
363 schema: m,
364 state: s,
365 diff: d,
366 }, nil
367}
368
369// Diff returns the diff for a resource given the schema map,
370// state, and configuration.
371func (m schemaMap) Diff(
372 s *terraform.InstanceState,
373 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
374 result := new(terraform.InstanceDiff)
375 result.Attributes = make(map[string]*terraform.ResourceAttrDiff)
376
377 // Make sure to mark if the resource is tainted
378 if s != nil {
379 result.DestroyTainted = s.Tainted
380 }
381
382 d := &ResourceData{
383 schema: m,
384 state: s,
385 config: c,
386 }
387
388 for k, schema := range m {
389 err := m.diff(k, schema, result, d, false)
390 if err != nil {
391 return nil, err
392 }
393 }
394
395 // If the diff requires a new resource, then we recompute the diff
396 // so we have the complete new resource diff, and preserve the
397 // RequiresNew fields where necessary so the user knows exactly what
398 // caused that.
399 if result.RequiresNew() {
400 // Create the new diff
401 result2 := new(terraform.InstanceDiff)
402 result2.Attributes = make(map[string]*terraform.ResourceAttrDiff)
403
404 // Preserve the DestroyTainted flag
405 result2.DestroyTainted = result.DestroyTainted
406
407 // Reset the data to not contain state. We have to call init()
408 // again in order to reset the FieldReaders.
409 d.state = nil
410 d.init()
411
412 // Perform the diff again
413 for k, schema := range m {
414 err := m.diff(k, schema, result2, d, false)
415 if err != nil {
416 return nil, err
417 }
418 }
419
420 // Force all the fields to not force a new since we know what we
421 // want to force new.
422 for k, attr := range result2.Attributes {
423 if attr == nil {
424 continue
425 }
426
427 if attr.RequiresNew {
428 attr.RequiresNew = false
429 }
430
431 if s != nil {
432 attr.Old = s.Attributes[k]
433 }
434 }
435
436 // Now copy in all the requires new diffs...
437 for k, attr := range result.Attributes {
438 if attr == nil {
439 continue
440 }
441
442 newAttr, ok := result2.Attributes[k]
443 if !ok {
444 newAttr = attr
445 }
446
447 if attr.RequiresNew {
448 newAttr.RequiresNew = true
449 }
450
451 result2.Attributes[k] = newAttr
452 }
453
454 // And set the diff!
455 result = result2
456 }
457
458 // Remove any nil diffs just to keep things clean
459 for k, v := range result.Attributes {
460 if v == nil {
461 delete(result.Attributes, k)
462 }
463 }
464
465 // Go through and detect all of the ComputedWhens now that we've
466 // finished the diff.
467 // TODO
468
469 if result.Empty() {
470 // If we don't have any diff elements, just return nil
471 return nil, nil
472 }
473
474 return result, nil
475}
476
477// Input implements the terraform.ResourceProvider method by asking
478// for input for required configuration keys that don't have a value.
479func (m schemaMap) Input(
480 input terraform.UIInput,
481 c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
482 keys := make([]string, 0, len(m))
483 for k, _ := range m {
484 keys = append(keys, k)
485 }
486 sort.Strings(keys)
487
488 for _, k := range keys {
489 v := m[k]
490
491 // Skip things that don't require config, if that is even valid
492 // for a provider schema.
493 // Required XOR Optional must always be true to validate, so we only
494 // need to check one.
495 if v.Optional {
496 continue
497 }
498
499 // Deprecated fields should never prompt
500 if v.Deprecated != "" {
501 continue
502 }
503
504 // Skip things that have a value of some sort already
505 if _, ok := c.Raw[k]; ok {
506 continue
507 }
508
509 // Skip if it has a default value
510 defaultValue, err := v.DefaultValue()
511 if err != nil {
512 return nil, fmt.Errorf("%s: error loading default: %s", k, err)
513 }
514 if defaultValue != nil {
515 continue
516 }
517
518 var value interface{}
519 switch v.Type {
520 case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList:
521 continue
522 case TypeString:
523 value, err = m.inputString(input, k, v)
524 default:
525 panic(fmt.Sprintf("Unknown type for input: %#v", v.Type))
526 }
527
528 if err != nil {
529 return nil, fmt.Errorf(
530 "%s: %s", k, err)
531 }
532
533 c.Config[k] = value
534 }
535
536 return c, nil
537}
538
539// Validate validates the configuration against this schema mapping.
540func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) {
541 return m.validateObject("", m, c)
542}
543
544// InternalValidate validates the format of this schema. This should be called
545// from a unit test (and not in user-path code) to verify that a schema
546// is properly built.
547func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
548 if topSchemaMap == nil {
549 topSchemaMap = m
550 }
551 for k, v := range m {
552 if v.Type == TypeInvalid {
553 return fmt.Errorf("%s: Type must be specified", k)
554 }
555
556 if v.Optional && v.Required {
557 return fmt.Errorf("%s: Optional or Required must be set, not both", k)
558 }
559
560 if v.Required && v.Computed {
561 return fmt.Errorf("%s: Cannot be both Required and Computed", k)
562 }
563
564 if !v.Required && !v.Optional && !v.Computed {
565 return fmt.Errorf("%s: One of optional, required, or computed must be set", k)
566 }
567
568 if v.Computed && v.Default != nil {
569 return fmt.Errorf("%s: Default must be nil if computed", k)
570 }
571
572 if v.Required && v.Default != nil {
573 return fmt.Errorf("%s: Default cannot be set with Required", k)
574 }
575
576 if len(v.ComputedWhen) > 0 && !v.Computed {
577 return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k)
578 }
579
580 if len(v.ConflictsWith) > 0 && v.Required {
581 return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k)
582 }
583
584 if len(v.ConflictsWith) > 0 {
585 for _, key := range v.ConflictsWith {
586 parts := strings.Split(key, ".")
587 sm := topSchemaMap
588 var target *Schema
589 for _, part := range parts {
590 // Skip index fields
591 if _, err := strconv.Atoi(part); err == nil {
592 continue
593 }
594
595 var ok bool
596 if target, ok = sm[part]; !ok {
597 return fmt.Errorf("%s: ConflictsWith references unknown attribute (%s)", k, key)
598 }
599
600 if subResource, ok := target.Elem.(*Resource); ok {
601 sm = schemaMap(subResource.Schema)
602 }
603 }
604 if target == nil {
605 return fmt.Errorf("%s: ConflictsWith cannot find target attribute (%s), sm: %#v", k, key, sm)
606 }
607 if target.Required {
608 return fmt.Errorf("%s: ConflictsWith cannot contain Required attribute (%s)", k, key)
609 }
610
611 if len(target.ComputedWhen) > 0 {
612 return fmt.Errorf("%s: ConflictsWith cannot contain Computed(When) attribute (%s)", k, key)
613 }
614 }
615 }
616
617 if v.Type == TypeList || v.Type == TypeSet {
618 if v.Elem == nil {
619 return fmt.Errorf("%s: Elem must be set for lists", k)
620 }
621
622 if v.Default != nil {
623 return fmt.Errorf("%s: Default is not valid for lists or sets", k)
624 }
625
626 if v.Type != TypeSet && v.Set != nil {
627 return fmt.Errorf("%s: Set can only be set for TypeSet", k)
628 }
629
630 switch t := v.Elem.(type) {
631 case *Resource:
632 if err := t.InternalValidate(topSchemaMap, true); err != nil {
633 return err
634 }
635 case *Schema:
636 bad := t.Computed || t.Optional || t.Required
637 if bad {
638 return fmt.Errorf(
639 "%s: Elem must have only Type set", k)
640 }
641 }
642 } else {
643 if v.MaxItems > 0 || v.MinItems > 0 {
644 return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k)
645 }
646 }
647
648 // Computed-only field
649 if v.Computed && !v.Optional {
650 if v.ValidateFunc != nil {
651 return fmt.Errorf("%s: ValidateFunc is for validating user input, "+
652 "there's nothing to validate on computed-only field", k)
653 }
654 if v.DiffSuppressFunc != nil {
655 return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+
656 " between config and state representation. "+
657 "There is no config for computed-only field, nothing to compare.", k)
658 }
659 }
660
661 if v.ValidateFunc != nil {
662 switch v.Type {
663 case TypeList, TypeSet:
664 return fmt.Errorf("ValidateFunc is not yet supported on lists or sets.")
665 }
666 }
667 }
668
669 return nil
670}
671
672func (m schemaMap) diff(
673 k string,
674 schema *Schema,
675 diff *terraform.InstanceDiff,
676 d *ResourceData,
677 all bool) error {
678
679 unsupressedDiff := new(terraform.InstanceDiff)
680 unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff)
681
682 var err error
683 switch schema.Type {
684 case TypeBool, TypeInt, TypeFloat, TypeString:
685 err = m.diffString(k, schema, unsupressedDiff, d, all)
686 case TypeList:
687 err = m.diffList(k, schema, unsupressedDiff, d, all)
688 case TypeMap:
689 err = m.diffMap(k, schema, unsupressedDiff, d, all)
690 case TypeSet:
691 err = m.diffSet(k, schema, unsupressedDiff, d, all)
692 default:
693 err = fmt.Errorf("%s: unknown type %#v", k, schema.Type)
694 }
695
696 for attrK, attrV := range unsupressedDiff.Attributes {
697 if schema.DiffSuppressFunc != nil &&
698 attrV != nil &&
699 schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, d) {
700 continue
701 }
702
703 diff.Attributes[attrK] = attrV
704 }
705
706 return err
707}
708
709func (m schemaMap) diffList(
710 k string,
711 schema *Schema,
712 diff *terraform.InstanceDiff,
713 d *ResourceData,
714 all bool) error {
715 o, n, _, computedList := d.diffChange(k)
716 if computedList {
717 n = nil
718 }
719 nSet := n != nil
720
721 // If we have an old value and no new value is set or will be
722 // computed once all variables can be interpolated and we're
723 // computed, then nothing has changed.
724 if o != nil && n == nil && !computedList && schema.Computed {
725 return nil
726 }
727
728 if o == nil {
729 o = []interface{}{}
730 }
731 if n == nil {
732 n = []interface{}{}
733 }
734 if s, ok := o.(*Set); ok {
735 o = s.List()
736 }
737 if s, ok := n.(*Set); ok {
738 n = s.List()
739 }
740 os := o.([]interface{})
741 vs := n.([]interface{})
742
743 // If the new value was set, and the two are equal, then we're done.
744 // We have to do this check here because sets might be NOT
745 // reflect.DeepEqual so we need to wait until we get the []interface{}
746 if !all && nSet && reflect.DeepEqual(os, vs) {
747 return nil
748 }
749
750 // Get the counts
751 oldLen := len(os)
752 newLen := len(vs)
753 oldStr := strconv.FormatInt(int64(oldLen), 10)
754
755 // If the whole list is computed, then say that the # is computed
756 if computedList {
757 diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{
758 Old: oldStr,
759 NewComputed: true,
760 RequiresNew: schema.ForceNew,
761 }
762 return nil
763 }
764
765 // If the counts are not the same, then record that diff
766 changed := oldLen != newLen
767 computed := oldLen == 0 && newLen == 0 && schema.Computed
768 if changed || computed || all {
769 countSchema := &Schema{
770 Type: TypeInt,
771 Computed: schema.Computed,
772 ForceNew: schema.ForceNew,
773 }
774
775 newStr := ""
776 if !computed {
777 newStr = strconv.FormatInt(int64(newLen), 10)
778 } else {
779 oldStr = ""
780 }
781
782 diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
783 Old: oldStr,
784 New: newStr,
785 })
786 }
787
788 // Figure out the maximum
789 maxLen := oldLen
790 if newLen > maxLen {
791 maxLen = newLen
792 }
793
794 switch t := schema.Elem.(type) {
795 case *Resource:
796 // This is a complex resource
797 for i := 0; i < maxLen; i++ {
798 for k2, schema := range t.Schema {
799 subK := fmt.Sprintf("%s.%d.%s", k, i, k2)
800 err := m.diff(subK, schema, diff, d, all)
801 if err != nil {
802 return err
803 }
804 }
805 }
806 case *Schema:
807 // Copy the schema so that we can set Computed/ForceNew from
808 // the parent schema (the TypeList).
809 t2 := *t
810 t2.ForceNew = schema.ForceNew
811
812 // This is just a primitive element, so go through each and
813 // just diff each.
814 for i := 0; i < maxLen; i++ {
815 subK := fmt.Sprintf("%s.%d", k, i)
816 err := m.diff(subK, &t2, diff, d, all)
817 if err != nil {
818 return err
819 }
820 }
821 default:
822 return fmt.Errorf("%s: unknown element type (internal)", k)
823 }
824
825 return nil
826}
827
828func (m schemaMap) diffMap(
829 k string,
830 schema *Schema,
831 diff *terraform.InstanceDiff,
832 d *ResourceData,
833 all bool) error {
834 prefix := k + "."
835
836 // First get all the values from the state
837 var stateMap, configMap map[string]string
838 o, n, _, nComputed := d.diffChange(k)
839 if err := mapstructure.WeakDecode(o, &stateMap); err != nil {
840 return fmt.Errorf("%s: %s", k, err)
841 }
842 if err := mapstructure.WeakDecode(n, &configMap); err != nil {
843 return fmt.Errorf("%s: %s", k, err)
844 }
845
846 // Keep track of whether the state _exists_ at all prior to clearing it
847 stateExists := o != nil
848
849 // Delete any count values, since we don't use those
850 delete(configMap, "%")
851 delete(stateMap, "%")
852
853 // Check if the number of elements has changed.
854 oldLen, newLen := len(stateMap), len(configMap)
855 changed := oldLen != newLen
856 if oldLen != 0 && newLen == 0 && schema.Computed {
857 changed = false
858 }
859
860 // It is computed if we have no old value, no new value, the schema
861 // says it is computed, and it didn't exist in the state before. The
862 // last point means: if it existed in the state, even empty, then it
863 // has already been computed.
864 computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists
865
866 // If the count has changed or we're computed, then add a diff for the
867 // count. "nComputed" means that the new value _contains_ a value that
868 // is computed. We don't do granular diffs for this yet, so we mark the
869 // whole map as computed.
870 if changed || computed || nComputed {
871 countSchema := &Schema{
872 Type: TypeInt,
873 Computed: schema.Computed || nComputed,
874 ForceNew: schema.ForceNew,
875 }
876
877 oldStr := strconv.FormatInt(int64(oldLen), 10)
878 newStr := ""
879 if !computed && !nComputed {
880 newStr = strconv.FormatInt(int64(newLen), 10)
881 } else {
882 oldStr = ""
883 }
884
885 diff.Attributes[k+".%"] = countSchema.finalizeDiff(
886 &terraform.ResourceAttrDiff{
887 Old: oldStr,
888 New: newStr,
889 },
890 )
891 }
892
893 // If the new map is nil and we're computed, then ignore it.
894 if n == nil && schema.Computed {
895 return nil
896 }
897
898 // Now we compare, preferring values from the config map
899 for k, v := range configMap {
900 old, ok := stateMap[k]
901 delete(stateMap, k)
902
903 if old == v && ok && !all {
904 continue
905 }
906
907 diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
908 Old: old,
909 New: v,
910 })
911 }
912 for k, v := range stateMap {
913 diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
914 Old: v,
915 NewRemoved: true,
916 })
917 }
918
919 return nil
920}
921
922func (m schemaMap) diffSet(
923 k string,
924 schema *Schema,
925 diff *terraform.InstanceDiff,
926 d *ResourceData,
927 all bool) error {
928
929 o, n, _, computedSet := d.diffChange(k)
930 if computedSet {
931 n = nil
932 }
933 nSet := n != nil
934
935 // If we have an old value and no new value is set or will be
936 // computed once all variables can be interpolated and we're
937 // computed, then nothing has changed.
938 if o != nil && n == nil && !computedSet && schema.Computed {
939 return nil
940 }
941
942 if o == nil {
943 o = schema.ZeroValue().(*Set)
944 }
945 if n == nil {
946 n = schema.ZeroValue().(*Set)
947 }
948 os := o.(*Set)
949 ns := n.(*Set)
950
951 // If the new value was set, compare the listCode's to determine if
952 // the two are equal. Comparing listCode's instead of the actual values
953 // is needed because there could be computed values in the set which
954 // would result in false positives while comparing.
955 if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) {
956 return nil
957 }
958
959 // Get the counts
960 oldLen := os.Len()
961 newLen := ns.Len()
962 oldStr := strconv.Itoa(oldLen)
963 newStr := strconv.Itoa(newLen)
964
965 // Build a schema for our count
966 countSchema := &Schema{
967 Type: TypeInt,
968 Computed: schema.Computed,
969 ForceNew: schema.ForceNew,
970 }
971
972 // If the set computed then say that the # is computed
973 if computedSet || schema.Computed && !nSet {
974 // If # already exists, equals 0 and no new set is supplied, there
975 // is nothing to record in the diff
976 count, ok := d.GetOk(k + ".#")
977 if ok && count.(int) == 0 && !nSet && !computedSet {
978 return nil
979 }
980
981 // Set the count but make sure that if # does not exist, we don't
982 // use the zeroed value
983 countStr := strconv.Itoa(count.(int))
984 if !ok {
985 countStr = ""
986 }
987
988 diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
989 Old: countStr,
990 NewComputed: true,
991 })
992 return nil
993 }
994
995 // If the counts are not the same, then record that diff
996 changed := oldLen != newLen
997 if changed || all {
998 diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
999 Old: oldStr,
1000 New: newStr,
1001 })
1002 }
1003
1004 // Build the list of codes that will make up our set. This is the
1005 // removed codes as well as all the codes in the new codes.
1006 codes := make([][]string, 2)
1007 codes[0] = os.Difference(ns).listCode()
1008 codes[1] = ns.listCode()
1009 for _, list := range codes {
1010 for _, code := range list {
1011 switch t := schema.Elem.(type) {
1012 case *Resource:
1013 // This is a complex resource
1014 for k2, schema := range t.Schema {
1015 subK := fmt.Sprintf("%s.%s.%s", k, code, k2)
1016 err := m.diff(subK, schema, diff, d, true)
1017 if err != nil {
1018 return err
1019 }
1020 }
1021 case *Schema:
1022 // Copy the schema so that we can set Computed/ForceNew from
1023 // the parent schema (the TypeSet).
1024 t2 := *t
1025 t2.ForceNew = schema.ForceNew
1026
1027 // This is just a primitive element, so go through each and
1028 // just diff each.
1029 subK := fmt.Sprintf("%s.%s", k, code)
1030 err := m.diff(subK, &t2, diff, d, true)
1031 if err != nil {
1032 return err
1033 }
1034 default:
1035 return fmt.Errorf("%s: unknown element type (internal)", k)
1036 }
1037 }
1038 }
1039
1040 return nil
1041}
1042
1043func (m schemaMap) diffString(
1044 k string,
1045 schema *Schema,
1046 diff *terraform.InstanceDiff,
1047 d *ResourceData,
1048 all bool) error {
1049 var originalN interface{}
1050 var os, ns string
1051 o, n, _, computed := d.diffChange(k)
1052 if schema.StateFunc != nil && n != nil {
1053 originalN = n
1054 n = schema.StateFunc(n)
1055 }
1056 nraw := n
1057 if nraw == nil && o != nil {
1058 nraw = schema.Type.Zero()
1059 }
1060 if err := mapstructure.WeakDecode(o, &os); err != nil {
1061 return fmt.Errorf("%s: %s", k, err)
1062 }
1063 if err := mapstructure.WeakDecode(nraw, &ns); err != nil {
1064 return fmt.Errorf("%s: %s", k, err)
1065 }
1066
1067 if os == ns && !all {
1068 // They're the same value. If there old value is not blank or we
1069 // have an ID, then return right away since we're already setup.
1070 if os != "" || d.Id() != "" {
1071 return nil
1072 }
1073
1074 // Otherwise, only continue if we're computed
1075 if !schema.Computed && !computed {
1076 return nil
1077 }
1078 }
1079
1080 removed := false
1081 if o != nil && n == nil {
1082 removed = true
1083 }
1084 if removed && schema.Computed {
1085 return nil
1086 }
1087
1088 diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
1089 Old: os,
1090 New: ns,
1091 NewExtra: originalN,
1092 NewRemoved: removed,
1093 NewComputed: computed,
1094 })
1095
1096 return nil
1097}
1098
1099func (m schemaMap) inputString(
1100 input terraform.UIInput,
1101 k string,
1102 schema *Schema) (interface{}, error) {
1103 result, err := input.Input(&terraform.InputOpts{
1104 Id: k,
1105 Query: k,
1106 Description: schema.Description,
1107 Default: schema.InputDefault,
1108 })
1109
1110 return result, err
1111}
1112
1113func (m schemaMap) validate(
1114 k string,
1115 schema *Schema,
1116 c *terraform.ResourceConfig) ([]string, []error) {
1117 raw, ok := c.Get(k)
1118 if !ok && schema.DefaultFunc != nil {
1119 // We have a dynamic default. Check if we have a value.
1120 var err error
1121 raw, err = schema.DefaultFunc()
1122 if err != nil {
1123 return nil, []error{fmt.Errorf(
1124 "%q, error loading default: %s", k, err)}
1125 }
1126
1127 // We're okay as long as we had a value set
1128 ok = raw != nil
1129 }
1130 if !ok {
1131 if schema.Required {
1132 return nil, []error{fmt.Errorf(
1133 "%q: required field is not set", k)}
1134 }
1135
1136 return nil, nil
1137 }
1138
1139 if !schema.Required && !schema.Optional {
1140 // This is a computed-only field
1141 return nil, []error{fmt.Errorf(
1142 "%q: this field cannot be set", k)}
1143 }
1144
1145 err := m.validateConflictingAttributes(k, schema, c)
1146 if err != nil {
1147 return nil, []error{err}
1148 }
1149
1150 return m.validateType(k, raw, schema, c)
1151}
1152
1153func (m schemaMap) validateConflictingAttributes(
1154 k string,
1155 schema *Schema,
1156 c *terraform.ResourceConfig) error {
1157
1158 if len(schema.ConflictsWith) == 0 {
1159 return nil
1160 }
1161
1162 for _, conflicting_key := range schema.ConflictsWith {
1163 if value, ok := c.Get(conflicting_key); ok {
1164 return fmt.Errorf(
1165 "%q: conflicts with %s (%#v)", k, conflicting_key, value)
1166 }
1167 }
1168
1169 return nil
1170}
1171
1172func (m schemaMap) validateList(
1173 k string,
1174 raw interface{},
1175 schema *Schema,
1176 c *terraform.ResourceConfig) ([]string, []error) {
1177 // We use reflection to verify the slice because you can't
1178 // case to []interface{} unless the slice is exactly that type.
1179 rawV := reflect.ValueOf(raw)
1180
1181 // If we support promotion and the raw value isn't a slice, wrap
1182 // it in []interface{} and check again.
1183 if schema.PromoteSingle && rawV.Kind() != reflect.Slice {
1184 raw = []interface{}{raw}
1185 rawV = reflect.ValueOf(raw)
1186 }
1187
1188 if rawV.Kind() != reflect.Slice {
1189 return nil, []error{fmt.Errorf(
1190 "%s: should be a list", k)}
1191 }
1192
1193 // Validate length
1194 if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems {
1195 return nil, []error{fmt.Errorf(
1196 "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())}
1197 }
1198
1199 if schema.MinItems > 0 && rawV.Len() < schema.MinItems {
1200 return nil, []error{fmt.Errorf(
1201 "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())}
1202 }
1203
1204 // Now build the []interface{}
1205 raws := make([]interface{}, rawV.Len())
1206 for i, _ := range raws {
1207 raws[i] = rawV.Index(i).Interface()
1208 }
1209
1210 var ws []string
1211 var es []error
1212 for i, raw := range raws {
1213 key := fmt.Sprintf("%s.%d", k, i)
1214
1215 // Reify the key value from the ResourceConfig.
1216 // If the list was computed we have all raw values, but some of these
1217 // may be known in the config, and aren't individually marked as Computed.
1218 if r, ok := c.Get(key); ok {
1219 raw = r
1220 }
1221
1222 var ws2 []string
1223 var es2 []error
1224 switch t := schema.Elem.(type) {
1225 case *Resource:
1226 // This is a sub-resource
1227 ws2, es2 = m.validateObject(key, t.Schema, c)
1228 case *Schema:
1229 ws2, es2 = m.validateType(key, raw, t, c)
1230 }
1231
1232 if len(ws2) > 0 {
1233 ws = append(ws, ws2...)
1234 }
1235 if len(es2) > 0 {
1236 es = append(es, es2...)
1237 }
1238 }
1239
1240 return ws, es
1241}
1242
1243func (m schemaMap) validateMap(
1244 k string,
1245 raw interface{},
1246 schema *Schema,
1247 c *terraform.ResourceConfig) ([]string, []error) {
1248 // We use reflection to verify the slice because you can't
1249 // case to []interface{} unless the slice is exactly that type.
1250 rawV := reflect.ValueOf(raw)
1251 switch rawV.Kind() {
1252 case reflect.String:
1253 // If raw and reified are equal, this is a string and should
1254 // be rejected.
1255 reified, reifiedOk := c.Get(k)
1256 if reifiedOk && raw == reified && !c.IsComputed(k) {
1257 return nil, []error{fmt.Errorf("%s: should be a map", k)}
1258 }
1259 // Otherwise it's likely raw is an interpolation.
1260 return nil, nil
1261 case reflect.Map:
1262 case reflect.Slice:
1263 default:
1264 return nil, []error{fmt.Errorf("%s: should be a map", k)}
1265 }
1266
1267 // If it is not a slice, validate directly
1268 if rawV.Kind() != reflect.Slice {
1269 mapIface := rawV.Interface()
1270 if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 {
1271 return nil, errs
1272 }
1273 if schema.ValidateFunc != nil {
1274 return schema.ValidateFunc(mapIface, k)
1275 }
1276 return nil, nil
1277 }
1278
1279 // It is a slice, verify that all the elements are maps
1280 raws := make([]interface{}, rawV.Len())
1281 for i, _ := range raws {
1282 raws[i] = rawV.Index(i).Interface()
1283 }
1284
1285 for _, raw := range raws {
1286 v := reflect.ValueOf(raw)
1287 if v.Kind() != reflect.Map {
1288 return nil, []error{fmt.Errorf(
1289 "%s: should be a map", k)}
1290 }
1291 mapIface := v.Interface()
1292 if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 {
1293 return nil, errs
1294 }
1295 }
1296
1297 if schema.ValidateFunc != nil {
1298 validatableMap := make(map[string]interface{})
1299 for _, raw := range raws {
1300 for k, v := range raw.(map[string]interface{}) {
1301 validatableMap[k] = v
1302 }
1303 }
1304
1305 return schema.ValidateFunc(validatableMap, k)
1306 }
1307
1308 return nil, nil
1309}
1310
1311func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) {
1312 for key, raw := range m {
1313 valueType, err := getValueType(k, schema)
1314 if err != nil {
1315 return nil, []error{err}
1316 }
1317
1318 switch valueType {
1319 case TypeBool:
1320 var n bool
1321 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1322 return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
1323 }
1324 case TypeInt:
1325 var n int
1326 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1327 return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
1328 }
1329 case TypeFloat:
1330 var n float64
1331 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1332 return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
1333 }
1334 case TypeString:
1335 var n string
1336 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1337 return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
1338 }
1339 default:
1340 panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type))
1341 }
1342 }
1343 return nil, nil
1344}
1345
1346func getValueType(k string, schema *Schema) (ValueType, error) {
1347 if schema.Elem == nil {
1348 return TypeString, nil
1349 }
1350 if vt, ok := schema.Elem.(ValueType); ok {
1351 return vt, nil
1352 }
1353
1354 if s, ok := schema.Elem.(*Schema); ok {
1355 if s.Elem == nil {
1356 return TypeString, nil
1357 }
1358 if vt, ok := s.Elem.(ValueType); ok {
1359 return vt, nil
1360 }
1361 }
1362
1363 if _, ok := schema.Elem.(*Resource); ok {
1364 // TODO: We don't actually support this (yet)
1365 // but silently pass the validation, until we decide
1366 // how to handle nested structures in maps
1367 return TypeString, nil
1368 }
1369 return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem)
1370}
1371
1372func (m schemaMap) validateObject(
1373 k string,
1374 schema map[string]*Schema,
1375 c *terraform.ResourceConfig) ([]string, []error) {
1376 raw, _ := c.GetRaw(k)
1377 if _, ok := raw.(map[string]interface{}); !ok {
1378 return nil, []error{fmt.Errorf(
1379 "%s: expected object, got %s",
1380 k, reflect.ValueOf(raw).Kind())}
1381 }
1382
1383 var ws []string
1384 var es []error
1385 for subK, s := range schema {
1386 key := subK
1387 if k != "" {
1388 key = fmt.Sprintf("%s.%s", k, subK)
1389 }
1390
1391 ws2, es2 := m.validate(key, s, c)
1392 if len(ws2) > 0 {
1393 ws = append(ws, ws2...)
1394 }
1395 if len(es2) > 0 {
1396 es = append(es, es2...)
1397 }
1398 }
1399
1400 // Detect any extra/unknown keys and report those as errors.
1401 if m, ok := raw.(map[string]interface{}); ok {
1402 for subk, _ := range m {
1403 if _, ok := schema[subk]; !ok {
1404 if subk == TimeoutsConfigKey {
1405 continue
1406 }
1407 es = append(es, fmt.Errorf(
1408 "%s: invalid or unknown key: %s", k, subk))
1409 }
1410 }
1411 }
1412
1413 return ws, es
1414}
1415
1416func (m schemaMap) validatePrimitive(
1417 k string,
1418 raw interface{},
1419 schema *Schema,
1420 c *terraform.ResourceConfig) ([]string, []error) {
1421
1422 // Catch if the user gave a complex type where a primitive was
1423 // expected, so we can return a friendly error message that
1424 // doesn't contain Go type system terminology.
1425 switch reflect.ValueOf(raw).Type().Kind() {
1426 case reflect.Slice:
1427 return nil, []error{
1428 fmt.Errorf("%s must be a single value, not a list", k),
1429 }
1430 case reflect.Map:
1431 return nil, []error{
1432 fmt.Errorf("%s must be a single value, not a map", k),
1433 }
1434 default: // ok
1435 }
1436
1437 if c.IsComputed(k) {
1438 // If the key is being computed, then it is not an error as
1439 // long as it's not a slice or map.
1440 return nil, nil
1441 }
1442
1443 var decoded interface{}
1444 switch schema.Type {
1445 case TypeBool:
1446 // Verify that we can parse this as the correct type
1447 var n bool
1448 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1449 return nil, []error{fmt.Errorf("%s: %s", k, err)}
1450 }
1451 decoded = n
1452 case TypeInt:
1453 // Verify that we can parse this as an int
1454 var n int
1455 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1456 return nil, []error{fmt.Errorf("%s: %s", k, err)}
1457 }
1458 decoded = n
1459 case TypeFloat:
1460 // Verify that we can parse this as an int
1461 var n float64
1462 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1463 return nil, []error{fmt.Errorf("%s: %s", k, err)}
1464 }
1465 decoded = n
1466 case TypeString:
1467 // Verify that we can parse this as a string
1468 var n string
1469 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1470 return nil, []error{fmt.Errorf("%s: %s", k, err)}
1471 }
1472 decoded = n
1473 default:
1474 panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type))
1475 }
1476
1477 if schema.ValidateFunc != nil {
1478 return schema.ValidateFunc(decoded, k)
1479 }
1480
1481 return nil, nil
1482}
1483
1484func (m schemaMap) validateType(
1485 k string,
1486 raw interface{},
1487 schema *Schema,
1488 c *terraform.ResourceConfig) ([]string, []error) {
1489 var ws []string
1490 var es []error
1491 switch schema.Type {
1492 case TypeSet, TypeList:
1493 ws, es = m.validateList(k, raw, schema, c)
1494 case TypeMap:
1495 ws, es = m.validateMap(k, raw, schema, c)
1496 default:
1497 ws, es = m.validatePrimitive(k, raw, schema, c)
1498 }
1499
1500 if schema.Deprecated != "" {
1501 ws = append(ws, fmt.Sprintf(
1502 "%q: [DEPRECATED] %s", k, schema.Deprecated))
1503 }
1504
1505 if schema.Removed != "" {
1506 es = append(es, fmt.Errorf(
1507 "%q: [REMOVED] %s", k, schema.Removed))
1508 }
1509
1510 return ws, es
1511}
1512
1513// Zero returns the zero value for a type.
1514func (t ValueType) Zero() interface{} {
1515 switch t {
1516 case TypeInvalid:
1517 return nil
1518 case TypeBool:
1519 return false
1520 case TypeInt:
1521 return 0
1522 case TypeFloat:
1523 return 0.0
1524 case TypeString:
1525 return ""
1526 case TypeList:
1527 return []interface{}{}
1528 case TypeMap:
1529 return map[string]interface{}{}
1530 case TypeSet:
1531 return new(Set)
1532 case typeObject:
1533 return map[string]interface{}{}
1534 default:
1535 panic(fmt.Sprintf("unknown type %s", t))
1536 }
1537}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go
new file mode 100644
index 0000000..fe6d750
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go
@@ -0,0 +1,125 @@
1package schema
2
3import (
4 "bytes"
5 "fmt"
6 "sort"
7 "strconv"
8)
9
10func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) {
11 if val == nil {
12 buf.WriteRune(';')
13 return
14 }
15
16 switch schema.Type {
17 case TypeBool:
18 if val.(bool) {
19 buf.WriteRune('1')
20 } else {
21 buf.WriteRune('0')
22 }
23 case TypeInt:
24 buf.WriteString(strconv.Itoa(val.(int)))
25 case TypeFloat:
26 buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64))
27 case TypeString:
28 buf.WriteString(val.(string))
29 case TypeList:
30 buf.WriteRune('(')
31 l := val.([]interface{})
32 for _, innerVal := range l {
33 serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
34 }
35 buf.WriteRune(')')
36 case TypeMap:
37
38 m := val.(map[string]interface{})
39 var keys []string
40 for k := range m {
41 keys = append(keys, k)
42 }
43 sort.Strings(keys)
44 buf.WriteRune('[')
45 for _, k := range keys {
46 innerVal := m[k]
47 if innerVal == nil {
48 continue
49 }
50 buf.WriteString(k)
51 buf.WriteRune(':')
52
53 switch innerVal := innerVal.(type) {
54 case int:
55 buf.WriteString(strconv.Itoa(innerVal))
56 case float64:
57 buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64))
58 case string:
59 buf.WriteString(innerVal)
60 default:
61 panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal))
62 }
63
64 buf.WriteRune(';')
65 }
66 buf.WriteRune(']')
67 case TypeSet:
68 buf.WriteRune('{')
69 s := val.(*Set)
70 for _, innerVal := range s.List() {
71 serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
72 }
73 buf.WriteRune('}')
74 default:
75 panic("unknown schema type to serialize")
76 }
77 buf.WriteRune(';')
78}
79
80// SerializeValueForHash appends a serialization of the given resource config
81// to the given buffer, guaranteeing deterministic results given the same value
82// and schema.
83//
84// Its primary purpose is as input into a hashing function in order
85// to hash complex substructures when used in sets, and so the serialization
86// is not reversible.
87func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) {
88 if val == nil {
89 return
90 }
91 sm := resource.Schema
92 m := val.(map[string]interface{})
93 var keys []string
94 for k := range sm {
95 keys = append(keys, k)
96 }
97 sort.Strings(keys)
98 for _, k := range keys {
99 innerSchema := sm[k]
100 // Skip attributes that are not user-provided. Computed attributes
101 // do not contribute to the hash since their ultimate value cannot
102 // be known at plan/diff time.
103 if !(innerSchema.Required || innerSchema.Optional) {
104 continue
105 }
106
107 buf.WriteString(k)
108 buf.WriteRune(':')
109 innerVal := m[k]
110 SerializeValueForHash(buf, innerVal, innerSchema)
111 }
112}
113
114func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) {
115 switch tElem := elem.(type) {
116 case *Schema:
117 SerializeValueForHash(buf, val, tElem)
118 case *Resource:
119 buf.WriteRune('<')
120 SerializeResourceForHash(buf, val, tElem)
121 buf.WriteString(">;")
122 default:
123 panic(fmt.Sprintf("invalid element type: %T", tElem))
124 }
125}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
new file mode 100644
index 0000000..de05f40
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
@@ -0,0 +1,209 @@
1package schema
2
3import (
4 "bytes"
5 "fmt"
6 "reflect"
7 "sort"
8 "strconv"
9 "sync"
10
11 "github.com/hashicorp/terraform/helper/hashcode"
12)
13
14// HashString hashes strings. If you want a Set of strings, this is the
15// SchemaSetFunc you want.
16func HashString(v interface{}) int {
17 return hashcode.String(v.(string))
18}
19
20// HashResource hashes complex structures that are described using
21// a *Resource. This is the default set implementation used when a set's
22// element type is a full resource.
23func HashResource(resource *Resource) SchemaSetFunc {
24 return func(v interface{}) int {
25 var buf bytes.Buffer
26 SerializeResourceForHash(&buf, v, resource)
27 return hashcode.String(buf.String())
28 }
29}
30
31// HashSchema hashes values that are described using a *Schema. This is the
32// default set implementation used when a set's element type is a single
33// schema.
34func HashSchema(schema *Schema) SchemaSetFunc {
35 return func(v interface{}) int {
36 var buf bytes.Buffer
37 SerializeValueForHash(&buf, v, schema)
38 return hashcode.String(buf.String())
39 }
40}
41
42// Set is a set data structure that is returned for elements of type
43// TypeSet.
44type Set struct {
45 F SchemaSetFunc
46
47 m map[string]interface{}
48 once sync.Once
49}
50
51// NewSet is a convenience method for creating a new set with the given
52// items.
53func NewSet(f SchemaSetFunc, items []interface{}) *Set {
54 s := &Set{F: f}
55 for _, i := range items {
56 s.Add(i)
57 }
58
59 return s
60}
61
62// CopySet returns a copy of another set.
63func CopySet(otherSet *Set) *Set {
64 return NewSet(otherSet.F, otherSet.List())
65}
66
67// Add adds an item to the set if it isn't already in the set.
68func (s *Set) Add(item interface{}) {
69 s.add(item, false)
70}
71
72// Remove removes an item if it's already in the set. Idempotent.
73func (s *Set) Remove(item interface{}) {
74 s.remove(item)
75}
76
77// Contains checks if the set has the given item.
78func (s *Set) Contains(item interface{}) bool {
79 _, ok := s.m[s.hash(item)]
80 return ok
81}
82
83// Len returns the amount of items in the set.
84func (s *Set) Len() int {
85 return len(s.m)
86}
87
88// List returns the elements of this set in slice format.
89//
90// The order of the returned elements is deterministic. Given the same
91// set, the order of this will always be the same.
92func (s *Set) List() []interface{} {
93 result := make([]interface{}, len(s.m))
94 for i, k := range s.listCode() {
95 result[i] = s.m[k]
96 }
97
98 return result
99}
100
101// Difference performs a set difference of the two sets, returning
102// a new third set that has only the elements unique to this set.
103func (s *Set) Difference(other *Set) *Set {
104 result := &Set{F: s.F}
105 result.once.Do(result.init)
106
107 for k, v := range s.m {
108 if _, ok := other.m[k]; !ok {
109 result.m[k] = v
110 }
111 }
112
113 return result
114}
115
116// Intersection performs the set intersection of the two sets
117// and returns a new third set.
118func (s *Set) Intersection(other *Set) *Set {
119 result := &Set{F: s.F}
120 result.once.Do(result.init)
121
122 for k, v := range s.m {
123 if _, ok := other.m[k]; ok {
124 result.m[k] = v
125 }
126 }
127
128 return result
129}
130
131// Union performs the set union of the two sets and returns a new third
132// set.
133func (s *Set) Union(other *Set) *Set {
134 result := &Set{F: s.F}
135 result.once.Do(result.init)
136
137 for k, v := range s.m {
138 result.m[k] = v
139 }
140 for k, v := range other.m {
141 result.m[k] = v
142 }
143
144 return result
145}
146
147func (s *Set) Equal(raw interface{}) bool {
148 other, ok := raw.(*Set)
149 if !ok {
150 return false
151 }
152
153 return reflect.DeepEqual(s.m, other.m)
154}
155
156func (s *Set) GoString() string {
157 return fmt.Sprintf("*Set(%#v)", s.m)
158}
159
160func (s *Set) init() {
161 s.m = make(map[string]interface{})
162}
163
164func (s *Set) add(item interface{}, computed bool) string {
165 s.once.Do(s.init)
166
167 code := s.hash(item)
168 if computed {
169 code = "~" + code
170 }
171
172 if _, ok := s.m[code]; !ok {
173 s.m[code] = item
174 }
175
176 return code
177}
178
179func (s *Set) hash(item interface{}) string {
180 code := s.F(item)
181 // Always return a nonnegative hashcode.
182 if code < 0 {
183 code = -code
184 }
185 return strconv.Itoa(code)
186}
187
188func (s *Set) remove(item interface{}) string {
189 s.once.Do(s.init)
190
191 code := s.hash(item)
192 delete(s.m, code)
193
194 return code
195}
196
197func (s *Set) index(item interface{}) int {
198 return sort.SearchStrings(s.listCode(), s.hash(item))
199}
200
201func (s *Set) listCode() []string {
202 // Sort the hash codes so the order of the list is deterministic
203 keys := make([]string, 0, len(s.m))
204 for k := range s.m {
205 keys = append(keys, k)
206 }
207 sort.Sort(sort.StringSlice(keys))
208 return keys
209}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
new file mode 100644
index 0000000..9765bdb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
@@ -0,0 +1,30 @@
1package schema
2
3import (
4 "testing"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/terraform"
8)
9
10// TestResourceDataRaw creates a ResourceData from a raw configuration map.
11func TestResourceDataRaw(
12 t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData {
13 c, err := config.NewRawConfig(raw)
14 if err != nil {
15 t.Fatalf("err: %s", err)
16 }
17
18 sm := schemaMap(schema)
19 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c))
20 if err != nil {
21 t.Fatalf("err: %s", err)
22 }
23
24 result, err := sm.Data(nil, diff)
25 if err != nil {
26 t.Fatalf("err: %s", err)
27 }
28
29 return result
30}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go
new file mode 100644
index 0000000..9286987
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go
@@ -0,0 +1,21 @@
1package schema
2
3//go:generate stringer -type=ValueType valuetype.go
4
5// ValueType is an enum of the type that can be represented by a schema.
6type ValueType int
7
8const (
9 TypeInvalid ValueType = iota
10 TypeBool
11 TypeInt
12 TypeFloat
13 TypeString
14 TypeList
15 TypeMap
16 TypeSet
17 typeObject
18)
19
20// NOTE: ValueType has more functions defined on it in schema.go. We can't
21// put them here because we reference other files.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
new file mode 100644
index 0000000..1610cec
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT.
2
3package schema
4
5import "fmt"
6
7const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject"
8
9var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77}
10
11func (i ValueType) String() string {
12 if i < 0 || i >= ValueType(len(_ValueType_index)-1) {
13 return fmt.Sprintf("ValueType(%d)", i)
14 }
15 return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go
new file mode 100644
index 0000000..7edd5e7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go
@@ -0,0 +1,80 @@
1package shadow
2
3import (
4 "fmt"
5 "io"
6 "reflect"
7
8 "github.com/hashicorp/go-multierror"
9 "github.com/mitchellh/reflectwalk"
10)
11
12// Close will close all shadow values within the given structure.
13//
14// This uses reflection to walk the structure, find all shadow elements,
15// and close them. Currently this will only find struct fields that are
16// shadow values, and not slice elements, etc.
17func Close(v interface{}) error {
18 // We require a pointer so we can address the internal fields
19 val := reflect.ValueOf(v)
20 if val.Kind() != reflect.Ptr {
21 return fmt.Errorf("value must be a pointer")
22 }
23
24 // Walk and close
25 var w closeWalker
26 if err := reflectwalk.Walk(v, &w); err != nil {
27 return err
28 }
29
30 return w.Err
31}
32
33type closeWalker struct {
34 Err error
35}
36
37func (w *closeWalker) Struct(reflect.Value) error {
38 // Do nothing. We implement this for reflectwalk.StructWalker
39 return nil
40}
41
42func (w *closeWalker) StructField(f reflect.StructField, v reflect.Value) error {
43 // Not sure why this would be but lets avoid some panics
44 if !v.IsValid() {
45 return nil
46 }
47
48 // Empty for exported, so don't check unexported fields
49 if f.PkgPath != "" {
50 return nil
51 }
52
53 // Verify the io.Closer is in this package
54 typ := v.Type()
55 if typ.PkgPath() != "github.com/hashicorp/terraform/helper/shadow" {
56 return nil
57 }
58
59 // We're looking for an io.Closer
60 raw := v.Interface()
61 if raw == nil {
62 return nil
63 }
64
65 closer, ok := raw.(io.Closer)
66 if !ok && v.CanAddr() {
67 closer, ok = v.Addr().Interface().(io.Closer)
68 }
69 if !ok {
70 return reflectwalk.SkipEntry
71 }
72
73 // Close it
74 if err := closer.Close(); err != nil {
75 w.Err = multierror.Append(w.Err, err)
76 }
77
78 // Don't go into the struct field
79 return reflectwalk.SkipEntry
80}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go
new file mode 100644
index 0000000..4223e92
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go
@@ -0,0 +1,128 @@
1package shadow
2
3import (
4 "sync"
5)
6
7// ComparedValue is a struct that finds a value by comparing some key
8// to the list of stored values. This is useful when there is no easy
9// uniquely identifying key that works in a map (for that, use KeyedValue).
10//
11// ComparedValue is very expensive, relative to other Value types. Try to
12// limit the number of values stored in a ComparedValue by potentially
13// nesting it within a KeyedValue (a keyed value points to a compared value,
14// for example).
15type ComparedValue struct {
16 // Func is a function that is given the lookup key and a single
17 // stored value. If it matches, it returns true.
18 Func func(k, v interface{}) bool
19
20 lock sync.Mutex
21 once sync.Once
22 closed bool
23 values []interface{}
24 waiters map[interface{}]*Value
25}
26
27// Close closes the value. This can never fail. For a definition of
28// "close" see the ErrClosed docs.
29func (w *ComparedValue) Close() error {
30 w.lock.Lock()
31 defer w.lock.Unlock()
32
33 // Set closed to true always
34 w.closed = true
35
36 // For all waiters, complete with ErrClosed
37 for k, val := range w.waiters {
38 val.SetValue(ErrClosed)
39 delete(w.waiters, k)
40 }
41
42 return nil
43}
44
45// Value returns the value that was set for the given key, or blocks
46// until one is available.
47func (w *ComparedValue) Value(k interface{}) interface{} {
48 v, val := w.valueWaiter(k)
49 if val == nil {
50 return v
51 }
52
53 return val.Value()
54}
55
56// ValueOk gets the value for the given key, returning immediately if the
57// value doesn't exist. The second return argument is true if the value exists.
58func (w *ComparedValue) ValueOk(k interface{}) (interface{}, bool) {
59 v, val := w.valueWaiter(k)
60 return v, val == nil
61}
62
63func (w *ComparedValue) SetValue(v interface{}) {
64 w.lock.Lock()
65 defer w.lock.Unlock()
66 w.once.Do(w.init)
67
68 // Check if we already have this exact value (by simply comparing
69 // with == directly). If we do, then we don't insert it again.
70 found := false
71 for _, v2 := range w.values {
72 if v == v2 {
73 found = true
74 break
75 }
76 }
77
78 if !found {
79 // Set the value, always
80 w.values = append(w.values, v)
81 }
82
83 // Go through the waiters
84 for k, val := range w.waiters {
85 if w.Func(k, v) {
86 val.SetValue(v)
87 delete(w.waiters, k)
88 }
89 }
90}
91
92func (w *ComparedValue) valueWaiter(k interface{}) (interface{}, *Value) {
93 w.lock.Lock()
94 w.once.Do(w.init)
95
96 // Look for a pre-existing value
97 for _, v := range w.values {
98 if w.Func(k, v) {
99 w.lock.Unlock()
100 return v, nil
101 }
102 }
103
104 // If we're closed, return that
105 if w.closed {
106 w.lock.Unlock()
107 return ErrClosed, nil
108 }
109
110 // Pre-existing value doesn't exist, create a waiter
111 val := w.waiters[k]
112 if val == nil {
113 val = new(Value)
114 w.waiters[k] = val
115 }
116 w.lock.Unlock()
117
118 // Return the waiter
119 return nil, val
120}
121
122// Must be called with w.lock held.
123func (w *ComparedValue) init() {
124 w.waiters = make(map[interface{}]*Value)
125 if w.Func == nil {
126 w.Func = func(k, v interface{}) bool { return k == v }
127 }
128}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go
new file mode 100644
index 0000000..432b036
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go
@@ -0,0 +1,151 @@
1package shadow
2
3import (
4 "sync"
5)
6
7// KeyedValue is a struct that coordinates a value by key. If a value is
8// not available for a give key, it'll block until it is available.
9type KeyedValue struct {
10 lock sync.Mutex
11 once sync.Once
12 values map[string]interface{}
13 waiters map[string]*Value
14 closed bool
15}
16
17// Close closes the value. This can never fail. For a definition of
18// "close" see the ErrClosed docs.
19func (w *KeyedValue) Close() error {
20 w.lock.Lock()
21 defer w.lock.Unlock()
22
23 // Set closed to true always
24 w.closed = true
25
26 // For all waiters, complete with ErrClosed
27 for k, val := range w.waiters {
28 val.SetValue(ErrClosed)
29 delete(w.waiters, k)
30 }
31
32 return nil
33}
34
35// Value returns the value that was set for the given key, or blocks
36// until one is available.
37func (w *KeyedValue) Value(k string) interface{} {
38 w.lock.Lock()
39 v, val := w.valueWaiter(k)
40 w.lock.Unlock()
41
42 // If we have no waiter, then return the value
43 if val == nil {
44 return v
45 }
46
47 // We have a waiter, so wait
48 return val.Value()
49}
50
51// WaitForChange waits for the value with the given key to be set again.
52// If the key isn't set, it'll wait for an initial value. Note that while
53// it is called "WaitForChange", the value isn't guaranteed to _change_;
54// this will return when a SetValue is called for the given k.
55func (w *KeyedValue) WaitForChange(k string) interface{} {
56 w.lock.Lock()
57 w.once.Do(w.init)
58
59 // If we're closed, we're closed
60 if w.closed {
61 w.lock.Unlock()
62 return ErrClosed
63 }
64
65 // Check for an active waiter. If there isn't one, make it
66 val := w.waiters[k]
67 if val == nil {
68 val = new(Value)
69 w.waiters[k] = val
70 }
71 w.lock.Unlock()
72
73 // And wait
74 return val.Value()
75}
76
77// ValueOk gets the value for the given key, returning immediately if the
78// value doesn't exist. The second return argument is true if the value exists.
79func (w *KeyedValue) ValueOk(k string) (interface{}, bool) {
80 w.lock.Lock()
81 defer w.lock.Unlock()
82
83 v, val := w.valueWaiter(k)
84 return v, val == nil
85}
86
87func (w *KeyedValue) SetValue(k string, v interface{}) {
88 w.lock.Lock()
89 defer w.lock.Unlock()
90 w.setValue(k, v)
91}
92
93// Init will initialize the key to a given value only if the key has
94// not been set before. This is safe to call multiple times and in parallel.
95func (w *KeyedValue) Init(k string, v interface{}) {
96 w.lock.Lock()
97 defer w.lock.Unlock()
98
99 // If we have a waiter, set the value.
100 _, val := w.valueWaiter(k)
101 if val != nil {
102 w.setValue(k, v)
103 }
104}
105
106// Must be called with w.lock held.
107func (w *KeyedValue) init() {
108 w.values = make(map[string]interface{})
109 w.waiters = make(map[string]*Value)
110}
111
112// setValue is like SetValue but assumes the lock is held.
113func (w *KeyedValue) setValue(k string, v interface{}) {
114 w.once.Do(w.init)
115
116 // Set the value, always
117 w.values[k] = v
118
119 // If we have a waiter, set it
120 if val, ok := w.waiters[k]; ok {
121 val.SetValue(v)
122 delete(w.waiters, k)
123 }
124}
125
126// valueWaiter gets the value or the Value waiter for a given key.
127//
128// This must be called with lock held.
129func (w *KeyedValue) valueWaiter(k string) (interface{}, *Value) {
130 w.once.Do(w.init)
131
132 // If we have this value already, return it
133 if v, ok := w.values[k]; ok {
134 return v, nil
135 }
136
137 // If we're closed, return that
138 if w.closed {
139 return ErrClosed, nil
140 }
141
142 // No pending value, check for a waiter
143 val := w.waiters[k]
144 if val == nil {
145 val = new(Value)
146 w.waiters[k] = val
147 }
148
149 // Return the waiter
150 return nil, val
151}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go
new file mode 100644
index 0000000..0a43d4d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go
@@ -0,0 +1,66 @@
1package shadow
2
3import (
4 "container/list"
5 "sync"
6)
7
8// OrderedValue is a struct that keeps track of a value in the order
9// it is set. Each time Value() is called, it will return the most recent
10// calls value then discard it.
11//
12// This is unlike Value that returns the same value once it is set.
13type OrderedValue struct {
14 lock sync.Mutex
15 values *list.List
16 waiters *list.List
17}
18
19// Value returns the last value that was set, or blocks until one
20// is received.
21func (w *OrderedValue) Value() interface{} {
22 w.lock.Lock()
23
24 // If we have a pending value already, use it
25 if w.values != nil && w.values.Len() > 0 {
26 front := w.values.Front()
27 w.values.Remove(front)
28 w.lock.Unlock()
29 return front.Value
30 }
31
32 // No pending value, create a waiter
33 if w.waiters == nil {
34 w.waiters = list.New()
35 }
36
37 var val Value
38 w.waiters.PushBack(&val)
39 w.lock.Unlock()
40
41 // Return the value once we have it
42 return val.Value()
43}
44
45// SetValue sets the latest value.
46func (w *OrderedValue) SetValue(v interface{}) {
47 w.lock.Lock()
48 defer w.lock.Unlock()
49
50 // If we have a waiter, notify it
51 if w.waiters != nil && w.waiters.Len() > 0 {
52 front := w.waiters.Front()
53 w.waiters.Remove(front)
54
55 val := front.Value.(*Value)
56 val.SetValue(v)
57 return
58 }
59
60 // Add it to the list of values
61 if w.values == nil {
62 w.values = list.New()
63 }
64
65 w.values.PushBack(v)
66}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go
new file mode 100644
index 0000000..2413335
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go
@@ -0,0 +1,79 @@
1package shadow
2
3import (
4 "errors"
5 "sync"
6)
7
8// ErrClosed is returned by any closed values.
9//
10// A "closed value" is when the shadow has been notified that the real
11// side is complete and any blocking values will _never_ be satisfied
12// in the future. In this case, this error is returned. If a value is already
13// available, that is still returned.
14var ErrClosed = errors.New("shadow closed")
15
16// Value is a struct that coordinates a value between two
17// parallel routines. It is similar to atomic.Value except that when
18// Value is called if it isn't set it will wait for it.
19//
20// The Value can be closed with Close, which will cause any future
21// blocking operations to return immediately with ErrClosed.
22type Value struct {
23 lock sync.Mutex
24 cond *sync.Cond
25 value interface{}
26 valueSet bool
27}
28
29// Close closes the value. This can never fail. For a definition of
30// "close" see the struct docs.
31func (w *Value) Close() error {
32 w.lock.Lock()
33 set := w.valueSet
34 w.lock.Unlock()
35
36 // If we haven't set the value, set it
37 if !set {
38 w.SetValue(ErrClosed)
39 }
40
41 // Done
42 return nil
43}
44
45// Value returns the value that was set.
46func (w *Value) Value() interface{} {
47 w.lock.Lock()
48 defer w.lock.Unlock()
49
50 // If we already have a value just return
51 for !w.valueSet {
52 // No value, setup the condition variable if we have to
53 if w.cond == nil {
54 w.cond = sync.NewCond(&w.lock)
55 }
56
57 // Wait on it
58 w.cond.Wait()
59 }
60
61 // Return the value
62 return w.value
63}
64
65// SetValue sets the value.
66func (w *Value) SetValue(v interface{}) {
67 w.lock.Lock()
68 defer w.lock.Unlock()
69
70 // Set the value
71 w.valueSet = true
72 w.value = v
73
74 // If we have a condition, clear it
75 if w.cond != nil {
76 w.cond.Broadcast()
77 w.cond = nil
78 }
79}
diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go b/vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go
new file mode 100644
index 0000000..b3eb90f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go
@@ -0,0 +1,11 @@
1package structure
2
3import "encoding/json"
4
5func ExpandJsonFromString(jsonString string) (map[string]interface{}, error) {
6 var result map[string]interface{}
7
8 err := json.Unmarshal([]byte(jsonString), &result)
9
10 return result, err
11}
diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go b/vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go
new file mode 100644
index 0000000..578ad2e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go
@@ -0,0 +1,16 @@
1package structure
2
3import "encoding/json"
4
5func FlattenJsonToString(input map[string]interface{}) (string, error) {
6 if len(input) == 0 {
7 return "", nil
8 }
9
10 result, err := json.Marshal(input)
11 if err != nil {
12 return "", err
13 }
14
15 return string(result), nil
16}
diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go b/vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go
new file mode 100644
index 0000000..3256b47
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go
@@ -0,0 +1,24 @@
1package structure
2
3import "encoding/json"
4
5// Takes a value containing JSON string and passes it through
6// the JSON parser to normalize it, returns either a parsing
7// error or normalized JSON string.
8func NormalizeJsonString(jsonString interface{}) (string, error) {
9 var j interface{}
10
11 if jsonString == nil || jsonString.(string) == "" {
12 return "", nil
13 }
14
15 s := jsonString.(string)
16
17 err := json.Unmarshal([]byte(s), &j)
18 if err != nil {
19 return s, err
20 }
21
22 bytes, _ := json.Marshal(j)
23 return string(bytes[:]), nil
24}
diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go b/vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go
new file mode 100644
index 0000000..46f794a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go
@@ -0,0 +1,21 @@
1package structure
2
3import (
4 "reflect"
5
6 "github.com/hashicorp/terraform/helper/schema"
7)
8
9func SuppressJsonDiff(k, old, new string, d *schema.ResourceData) bool {
10 oldMap, err := ExpandJsonFromString(old)
11 if err != nil {
12 return false
13 }
14
15 newMap, err := ExpandJsonFromString(new)
16 if err != nil {
17 return false
18 }
19
20 return reflect.DeepEqual(oldMap, newMap)
21}
diff --git a/vendor/github.com/hashicorp/terraform/helper/validation/validation.go b/vendor/github.com/hashicorp/terraform/helper/validation/validation.go
new file mode 100644
index 0000000..7b894f5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/validation/validation.go
@@ -0,0 +1,108 @@
1package validation
2
3import (
4 "fmt"
5 "net"
6 "strings"
7
8 "github.com/hashicorp/terraform/helper/schema"
9 "github.com/hashicorp/terraform/helper/structure"
10)
11
12// IntBetween returns a SchemaValidateFunc which tests if the provided value
13// is of type int and is between min and max (inclusive)
14func IntBetween(min, max int) schema.SchemaValidateFunc {
15 return func(i interface{}, k string) (s []string, es []error) {
16 v, ok := i.(int)
17 if !ok {
18 es = append(es, fmt.Errorf("expected type of %s to be int", k))
19 return
20 }
21
22 if v < min || v > max {
23 es = append(es, fmt.Errorf("expected %s to be in the range (%d - %d), got %d", k, min, max, v))
24 return
25 }
26
27 return
28 }
29}
30
31// StringInSlice returns a SchemaValidateFunc which tests if the provided value
32// is of type string and matches the value of an element in the valid slice
33// will test with in lower case if ignoreCase is true
34func StringInSlice(valid []string, ignoreCase bool) schema.SchemaValidateFunc {
35 return func(i interface{}, k string) (s []string, es []error) {
36 v, ok := i.(string)
37 if !ok {
38 es = append(es, fmt.Errorf("expected type of %s to be string", k))
39 return
40 }
41
42 for _, str := range valid {
43 if v == str || (ignoreCase && strings.ToLower(v) == strings.ToLower(str)) {
44 return
45 }
46 }
47
48 es = append(es, fmt.Errorf("expected %s to be one of %v, got %s", k, valid, v))
49 return
50 }
51}
52
53// StringLenBetween returns a SchemaValidateFunc which tests if the provided value
54// is of type string and has length between min and max (inclusive)
55func StringLenBetween(min, max int) schema.SchemaValidateFunc {
56 return func(i interface{}, k string) (s []string, es []error) {
57 v, ok := i.(string)
58 if !ok {
59 es = append(es, fmt.Errorf("expected type of %s to be string", k))
60 return
61 }
62 if len(v) < min || len(v) > max {
63 es = append(es, fmt.Errorf("expected length of %s to be in the range (%d - %d), got %s", k, min, max, v))
64 }
65 return
66 }
67}
68
69// CIDRNetwork returns a SchemaValidateFunc which tests if the provided value
70// is of type string, is in valid CIDR network notation, and has significant bits between min and max (inclusive)
71func CIDRNetwork(min, max int) schema.SchemaValidateFunc {
72 return func(i interface{}, k string) (s []string, es []error) {
73 v, ok := i.(string)
74 if !ok {
75 es = append(es, fmt.Errorf("expected type of %s to be string", k))
76 return
77 }
78
79 _, ipnet, err := net.ParseCIDR(v)
80 if err != nil {
81 es = append(es, fmt.Errorf(
82 "expected %s to contain a valid CIDR, got: %s with err: %s", k, v, err))
83 return
84 }
85
86 if ipnet == nil || v != ipnet.String() {
87 es = append(es, fmt.Errorf(
88 "expected %s to contain a valid network CIDR, expected %s, got %s",
89 k, ipnet, v))
90 }
91
92 sigbits, _ := ipnet.Mask.Size()
93 if sigbits < min || sigbits > max {
94 es = append(es, fmt.Errorf(
95 "expected %q to contain a network CIDR with between %d and %d significant bits, got: %d",
96 k, min, max, sigbits))
97 }
98
99 return
100 }
101}
102
103func ValidateJsonString(v interface{}, k string) (ws []string, errors []error) {
104 if _, err := structure.NormalizeJsonString(v); err != nil {
105 errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err))
106 }
107 return
108}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/plugin.go b/vendor/github.com/hashicorp/terraform/plugin/plugin.go
new file mode 100644
index 0000000..00fa7b2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/plugin.go
@@ -0,0 +1,13 @@
1package plugin
2
3import (
4 "github.com/hashicorp/go-plugin"
5)
6
7// See serve.go for serving plugins
8
9// PluginMap should be used by clients for the map of plugins.
10var PluginMap = map[string]plugin.Plugin{
11 "provider": &ResourceProviderPlugin{},
12 "provisioner": &ResourceProvisionerPlugin{},
13}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
new file mode 100644
index 0000000..473f786
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
@@ -0,0 +1,578 @@
1package plugin
2
3import (
4 "net/rpc"
5
6 "github.com/hashicorp/go-plugin"
7 "github.com/hashicorp/terraform/terraform"
8)
9
10// ResourceProviderPlugin is the plugin.Plugin implementation.
11type ResourceProviderPlugin struct {
12 F func() terraform.ResourceProvider
13}
14
15func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
16 return &ResourceProviderServer{Broker: b, Provider: p.F()}, nil
17}
18
19func (p *ResourceProviderPlugin) Client(
20 b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
21 return &ResourceProvider{Broker: b, Client: c}, nil
22}
23
24// ResourceProvider is an implementation of terraform.ResourceProvider
25// that communicates over RPC.
26type ResourceProvider struct {
27 Broker *plugin.MuxBroker
28 Client *rpc.Client
29}
30
31func (p *ResourceProvider) Stop() error {
32 var resp ResourceProviderStopResponse
33 err := p.Client.Call("Plugin.Stop", new(interface{}), &resp)
34 if err != nil {
35 return err
36 }
37 if resp.Error != nil {
38 err = resp.Error
39 }
40
41 return err
42}
43
44func (p *ResourceProvider) Input(
45 input terraform.UIInput,
46 c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
47 id := p.Broker.NextId()
48 go p.Broker.AcceptAndServe(id, &UIInputServer{
49 UIInput: input,
50 })
51
52 var resp ResourceProviderInputResponse
53 args := ResourceProviderInputArgs{
54 InputId: id,
55 Config: c,
56 }
57
58 err := p.Client.Call("Plugin.Input", &args, &resp)
59 if err != nil {
60 return nil, err
61 }
62 if resp.Error != nil {
63 err = resp.Error
64 return nil, err
65 }
66
67 return resp.Config, nil
68}
69
70func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
71 var resp ResourceProviderValidateResponse
72 args := ResourceProviderValidateArgs{
73 Config: c,
74 }
75
76 err := p.Client.Call("Plugin.Validate", &args, &resp)
77 if err != nil {
78 return nil, []error{err}
79 }
80
81 var errs []error
82 if len(resp.Errors) > 0 {
83 errs = make([]error, len(resp.Errors))
84 for i, err := range resp.Errors {
85 errs[i] = err
86 }
87 }
88
89 return resp.Warnings, errs
90}
91
92func (p *ResourceProvider) ValidateResource(
93 t string, c *terraform.ResourceConfig) ([]string, []error) {
94 var resp ResourceProviderValidateResourceResponse
95 args := ResourceProviderValidateResourceArgs{
96 Config: c,
97 Type: t,
98 }
99
100 err := p.Client.Call("Plugin.ValidateResource", &args, &resp)
101 if err != nil {
102 return nil, []error{err}
103 }
104
105 var errs []error
106 if len(resp.Errors) > 0 {
107 errs = make([]error, len(resp.Errors))
108 for i, err := range resp.Errors {
109 errs[i] = err
110 }
111 }
112
113 return resp.Warnings, errs
114}
115
116func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error {
117 var resp ResourceProviderConfigureResponse
118 err := p.Client.Call("Plugin.Configure", c, &resp)
119 if err != nil {
120 return err
121 }
122 if resp.Error != nil {
123 err = resp.Error
124 }
125
126 return err
127}
128
129func (p *ResourceProvider) Apply(
130 info *terraform.InstanceInfo,
131 s *terraform.InstanceState,
132 d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
133 var resp ResourceProviderApplyResponse
134 args := &ResourceProviderApplyArgs{
135 Info: info,
136 State: s,
137 Diff: d,
138 }
139
140 err := p.Client.Call("Plugin.Apply", args, &resp)
141 if err != nil {
142 return nil, err
143 }
144 if resp.Error != nil {
145 err = resp.Error
146 }
147
148 return resp.State, err
149}
150
151func (p *ResourceProvider) Diff(
152 info *terraform.InstanceInfo,
153 s *terraform.InstanceState,
154 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
155 var resp ResourceProviderDiffResponse
156 args := &ResourceProviderDiffArgs{
157 Info: info,
158 State: s,
159 Config: c,
160 }
161 err := p.Client.Call("Plugin.Diff", args, &resp)
162 if err != nil {
163 return nil, err
164 }
165 if resp.Error != nil {
166 err = resp.Error
167 }
168
169 return resp.Diff, err
170}
171
172func (p *ResourceProvider) ValidateDataSource(
173 t string, c *terraform.ResourceConfig) ([]string, []error) {
174 var resp ResourceProviderValidateResourceResponse
175 args := ResourceProviderValidateResourceArgs{
176 Config: c,
177 Type: t,
178 }
179
180 err := p.Client.Call("Plugin.ValidateDataSource", &args, &resp)
181 if err != nil {
182 return nil, []error{err}
183 }
184
185 var errs []error
186 if len(resp.Errors) > 0 {
187 errs = make([]error, len(resp.Errors))
188 for i, err := range resp.Errors {
189 errs[i] = err
190 }
191 }
192
193 return resp.Warnings, errs
194}
195
196func (p *ResourceProvider) Refresh(
197 info *terraform.InstanceInfo,
198 s *terraform.InstanceState) (*terraform.InstanceState, error) {
199 var resp ResourceProviderRefreshResponse
200 args := &ResourceProviderRefreshArgs{
201 Info: info,
202 State: s,
203 }
204
205 err := p.Client.Call("Plugin.Refresh", args, &resp)
206 if err != nil {
207 return nil, err
208 }
209 if resp.Error != nil {
210 err = resp.Error
211 }
212
213 return resp.State, err
214}
215
216func (p *ResourceProvider) ImportState(
217 info *terraform.InstanceInfo,
218 id string) ([]*terraform.InstanceState, error) {
219 var resp ResourceProviderImportStateResponse
220 args := &ResourceProviderImportStateArgs{
221 Info: info,
222 Id: id,
223 }
224
225 err := p.Client.Call("Plugin.ImportState", args, &resp)
226 if err != nil {
227 return nil, err
228 }
229 if resp.Error != nil {
230 err = resp.Error
231 }
232
233 return resp.State, err
234}
235
236func (p *ResourceProvider) Resources() []terraform.ResourceType {
237 var result []terraform.ResourceType
238
239 err := p.Client.Call("Plugin.Resources", new(interface{}), &result)
240 if err != nil {
241 // TODO: panic, log, what?
242 return nil
243 }
244
245 return result
246}
247
248func (p *ResourceProvider) ReadDataDiff(
249 info *terraform.InstanceInfo,
250 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
251 var resp ResourceProviderReadDataDiffResponse
252 args := &ResourceProviderReadDataDiffArgs{
253 Info: info,
254 Config: c,
255 }
256
257 err := p.Client.Call("Plugin.ReadDataDiff", args, &resp)
258 if err != nil {
259 return nil, err
260 }
261 if resp.Error != nil {
262 err = resp.Error
263 }
264
265 return resp.Diff, err
266}
267
268func (p *ResourceProvider) ReadDataApply(
269 info *terraform.InstanceInfo,
270 d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
271 var resp ResourceProviderReadDataApplyResponse
272 args := &ResourceProviderReadDataApplyArgs{
273 Info: info,
274 Diff: d,
275 }
276
277 err := p.Client.Call("Plugin.ReadDataApply", args, &resp)
278 if err != nil {
279 return nil, err
280 }
281 if resp.Error != nil {
282 err = resp.Error
283 }
284
285 return resp.State, err
286}
287
288func (p *ResourceProvider) DataSources() []terraform.DataSource {
289 var result []terraform.DataSource
290
291 err := p.Client.Call("Plugin.DataSources", new(interface{}), &result)
292 if err != nil {
293 // TODO: panic, log, what?
294 return nil
295 }
296
297 return result
298}
299
300func (p *ResourceProvider) Close() error {
301 return p.Client.Close()
302}
303
304// ResourceProviderServer is a net/rpc compatible structure for serving
305// a ResourceProvider. This should not be used directly.
306type ResourceProviderServer struct {
307 Broker *plugin.MuxBroker
308 Provider terraform.ResourceProvider
309}
310
311type ResourceProviderStopResponse struct {
312 Error *plugin.BasicError
313}
314
315type ResourceProviderConfigureResponse struct {
316 Error *plugin.BasicError
317}
318
319type ResourceProviderInputArgs struct {
320 InputId uint32
321 Config *terraform.ResourceConfig
322}
323
324type ResourceProviderInputResponse struct {
325 Config *terraform.ResourceConfig
326 Error *plugin.BasicError
327}
328
329type ResourceProviderApplyArgs struct {
330 Info *terraform.InstanceInfo
331 State *terraform.InstanceState
332 Diff *terraform.InstanceDiff
333}
334
335type ResourceProviderApplyResponse struct {
336 State *terraform.InstanceState
337 Error *plugin.BasicError
338}
339
340type ResourceProviderDiffArgs struct {
341 Info *terraform.InstanceInfo
342 State *terraform.InstanceState
343 Config *terraform.ResourceConfig
344}
345
346type ResourceProviderDiffResponse struct {
347 Diff *terraform.InstanceDiff
348 Error *plugin.BasicError
349}
350
351type ResourceProviderRefreshArgs struct {
352 Info *terraform.InstanceInfo
353 State *terraform.InstanceState
354}
355
356type ResourceProviderRefreshResponse struct {
357 State *terraform.InstanceState
358 Error *plugin.BasicError
359}
360
361type ResourceProviderImportStateArgs struct {
362 Info *terraform.InstanceInfo
363 Id string
364}
365
366type ResourceProviderImportStateResponse struct {
367 State []*terraform.InstanceState
368 Error *plugin.BasicError
369}
370
371type ResourceProviderReadDataApplyArgs struct {
372 Info *terraform.InstanceInfo
373 Diff *terraform.InstanceDiff
374}
375
376type ResourceProviderReadDataApplyResponse struct {
377 State *terraform.InstanceState
378 Error *plugin.BasicError
379}
380
381type ResourceProviderReadDataDiffArgs struct {
382 Info *terraform.InstanceInfo
383 Config *terraform.ResourceConfig
384}
385
386type ResourceProviderReadDataDiffResponse struct {
387 Diff *terraform.InstanceDiff
388 Error *plugin.BasicError
389}
390
391type ResourceProviderValidateArgs struct {
392 Config *terraform.ResourceConfig
393}
394
395type ResourceProviderValidateResponse struct {
396 Warnings []string
397 Errors []*plugin.BasicError
398}
399
400type ResourceProviderValidateResourceArgs struct {
401 Config *terraform.ResourceConfig
402 Type string
403}
404
405type ResourceProviderValidateResourceResponse struct {
406 Warnings []string
407 Errors []*plugin.BasicError
408}
409
410func (s *ResourceProviderServer) Stop(
411 _ interface{},
412 reply *ResourceProviderStopResponse) error {
413 err := s.Provider.Stop()
414 *reply = ResourceProviderStopResponse{
415 Error: plugin.NewBasicError(err),
416 }
417
418 return nil
419}
420
421func (s *ResourceProviderServer) Input(
422 args *ResourceProviderInputArgs,
423 reply *ResourceProviderInputResponse) error {
424 conn, err := s.Broker.Dial(args.InputId)
425 if err != nil {
426 *reply = ResourceProviderInputResponse{
427 Error: plugin.NewBasicError(err),
428 }
429 return nil
430 }
431 client := rpc.NewClient(conn)
432 defer client.Close()
433
434 input := &UIInput{Client: client}
435
436 config, err := s.Provider.Input(input, args.Config)
437 *reply = ResourceProviderInputResponse{
438 Config: config,
439 Error: plugin.NewBasicError(err),
440 }
441
442 return nil
443}
444
445func (s *ResourceProviderServer) Validate(
446 args *ResourceProviderValidateArgs,
447 reply *ResourceProviderValidateResponse) error {
448 warns, errs := s.Provider.Validate(args.Config)
449 berrs := make([]*plugin.BasicError, len(errs))
450 for i, err := range errs {
451 berrs[i] = plugin.NewBasicError(err)
452 }
453 *reply = ResourceProviderValidateResponse{
454 Warnings: warns,
455 Errors: berrs,
456 }
457 return nil
458}
459
460func (s *ResourceProviderServer) ValidateResource(
461 args *ResourceProviderValidateResourceArgs,
462 reply *ResourceProviderValidateResourceResponse) error {
463 warns, errs := s.Provider.ValidateResource(args.Type, args.Config)
464 berrs := make([]*plugin.BasicError, len(errs))
465 for i, err := range errs {
466 berrs[i] = plugin.NewBasicError(err)
467 }
468 *reply = ResourceProviderValidateResourceResponse{
469 Warnings: warns,
470 Errors: berrs,
471 }
472 return nil
473}
474
475func (s *ResourceProviderServer) Configure(
476 config *terraform.ResourceConfig,
477 reply *ResourceProviderConfigureResponse) error {
478 err := s.Provider.Configure(config)
479 *reply = ResourceProviderConfigureResponse{
480 Error: plugin.NewBasicError(err),
481 }
482 return nil
483}
484
485func (s *ResourceProviderServer) Apply(
486 args *ResourceProviderApplyArgs,
487 result *ResourceProviderApplyResponse) error {
488 state, err := s.Provider.Apply(args.Info, args.State, args.Diff)
489 *result = ResourceProviderApplyResponse{
490 State: state,
491 Error: plugin.NewBasicError(err),
492 }
493 return nil
494}
495
496func (s *ResourceProviderServer) Diff(
497 args *ResourceProviderDiffArgs,
498 result *ResourceProviderDiffResponse) error {
499 diff, err := s.Provider.Diff(args.Info, args.State, args.Config)
500 *result = ResourceProviderDiffResponse{
501 Diff: diff,
502 Error: plugin.NewBasicError(err),
503 }
504 return nil
505}
506
507func (s *ResourceProviderServer) Refresh(
508 args *ResourceProviderRefreshArgs,
509 result *ResourceProviderRefreshResponse) error {
510 newState, err := s.Provider.Refresh(args.Info, args.State)
511 *result = ResourceProviderRefreshResponse{
512 State: newState,
513 Error: plugin.NewBasicError(err),
514 }
515 return nil
516}
517
518func (s *ResourceProviderServer) ImportState(
519 args *ResourceProviderImportStateArgs,
520 result *ResourceProviderImportStateResponse) error {
521 states, err := s.Provider.ImportState(args.Info, args.Id)
522 *result = ResourceProviderImportStateResponse{
523 State: states,
524 Error: plugin.NewBasicError(err),
525 }
526 return nil
527}
528
529func (s *ResourceProviderServer) Resources(
530 nothing interface{},
531 result *[]terraform.ResourceType) error {
532 *result = s.Provider.Resources()
533 return nil
534}
535
536func (s *ResourceProviderServer) ValidateDataSource(
537 args *ResourceProviderValidateResourceArgs,
538 reply *ResourceProviderValidateResourceResponse) error {
539 warns, errs := s.Provider.ValidateDataSource(args.Type, args.Config)
540 berrs := make([]*plugin.BasicError, len(errs))
541 for i, err := range errs {
542 berrs[i] = plugin.NewBasicError(err)
543 }
544 *reply = ResourceProviderValidateResourceResponse{
545 Warnings: warns,
546 Errors: berrs,
547 }
548 return nil
549}
550
551func (s *ResourceProviderServer) ReadDataDiff(
552 args *ResourceProviderReadDataDiffArgs,
553 result *ResourceProviderReadDataDiffResponse) error {
554 diff, err := s.Provider.ReadDataDiff(args.Info, args.Config)
555 *result = ResourceProviderReadDataDiffResponse{
556 Diff: diff,
557 Error: plugin.NewBasicError(err),
558 }
559 return nil
560}
561
562func (s *ResourceProviderServer) ReadDataApply(
563 args *ResourceProviderReadDataApplyArgs,
564 result *ResourceProviderReadDataApplyResponse) error {
565 newState, err := s.Provider.ReadDataApply(args.Info, args.Diff)
566 *result = ResourceProviderReadDataApplyResponse{
567 State: newState,
568 Error: plugin.NewBasicError(err),
569 }
570 return nil
571}
572
573func (s *ResourceProviderServer) DataSources(
574 nothing interface{},
575 result *[]terraform.DataSource) error {
576 *result = s.Provider.DataSources()
577 return nil
578}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
new file mode 100644
index 0000000..8fce9d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
@@ -0,0 +1,173 @@
1package plugin
2
3import (
4 "net/rpc"
5
6 "github.com/hashicorp/go-plugin"
7 "github.com/hashicorp/terraform/terraform"
8)
9
10// ResourceProvisionerPlugin is the plugin.Plugin implementation.
11type ResourceProvisionerPlugin struct {
12 F func() terraform.ResourceProvisioner
13}
14
15func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
16 return &ResourceProvisionerServer{Broker: b, Provisioner: p.F()}, nil
17}
18
19func (p *ResourceProvisionerPlugin) Client(
20 b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
21 return &ResourceProvisioner{Broker: b, Client: c}, nil
22}
23
24// ResourceProvisioner is an implementation of terraform.ResourceProvisioner
25// that communicates over RPC.
26type ResourceProvisioner struct {
27 Broker *plugin.MuxBroker
28 Client *rpc.Client
29}
30
31func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {
32 var resp ResourceProvisionerValidateResponse
33 args := ResourceProvisionerValidateArgs{
34 Config: c,
35 }
36
37 err := p.Client.Call("Plugin.Validate", &args, &resp)
38 if err != nil {
39 return nil, []error{err}
40 }
41
42 var errs []error
43 if len(resp.Errors) > 0 {
44 errs = make([]error, len(resp.Errors))
45 for i, err := range resp.Errors {
46 errs[i] = err
47 }
48 }
49
50 return resp.Warnings, errs
51}
52
53func (p *ResourceProvisioner) Apply(
54 output terraform.UIOutput,
55 s *terraform.InstanceState,
56 c *terraform.ResourceConfig) error {
57 id := p.Broker.NextId()
58 go p.Broker.AcceptAndServe(id, &UIOutputServer{
59 UIOutput: output,
60 })
61
62 var resp ResourceProvisionerApplyResponse
63 args := &ResourceProvisionerApplyArgs{
64 OutputId: id,
65 State: s,
66 Config: c,
67 }
68
69 err := p.Client.Call("Plugin.Apply", args, &resp)
70 if err != nil {
71 return err
72 }
73 if resp.Error != nil {
74 err = resp.Error
75 }
76
77 return err
78}
79
80func (p *ResourceProvisioner) Stop() error {
81 var resp ResourceProvisionerStopResponse
82 err := p.Client.Call("Plugin.Stop", new(interface{}), &resp)
83 if err != nil {
84 return err
85 }
86 if resp.Error != nil {
87 err = resp.Error
88 }
89
90 return err
91}
92
93func (p *ResourceProvisioner) Close() error {
94 return p.Client.Close()
95}
96
97type ResourceProvisionerValidateArgs struct {
98 Config *terraform.ResourceConfig
99}
100
101type ResourceProvisionerValidateResponse struct {
102 Warnings []string
103 Errors []*plugin.BasicError
104}
105
106type ResourceProvisionerApplyArgs struct {
107 OutputId uint32
108 State *terraform.InstanceState
109 Config *terraform.ResourceConfig
110}
111
112type ResourceProvisionerApplyResponse struct {
113 Error *plugin.BasicError
114}
115
116type ResourceProvisionerStopResponse struct {
117 Error *plugin.BasicError
118}
119
120// ResourceProvisionerServer is a net/rpc compatible structure for serving
121// a ResourceProvisioner. This should not be used directly.
122type ResourceProvisionerServer struct {
123 Broker *plugin.MuxBroker
124 Provisioner terraform.ResourceProvisioner
125}
126
127func (s *ResourceProvisionerServer) Apply(
128 args *ResourceProvisionerApplyArgs,
129 result *ResourceProvisionerApplyResponse) error {
130 conn, err := s.Broker.Dial(args.OutputId)
131 if err != nil {
132 *result = ResourceProvisionerApplyResponse{
133 Error: plugin.NewBasicError(err),
134 }
135 return nil
136 }
137 client := rpc.NewClient(conn)
138 defer client.Close()
139
140 output := &UIOutput{Client: client}
141
142 err = s.Provisioner.Apply(output, args.State, args.Config)
143 *result = ResourceProvisionerApplyResponse{
144 Error: plugin.NewBasicError(err),
145 }
146 return nil
147}
148
149func (s *ResourceProvisionerServer) Validate(
150 args *ResourceProvisionerValidateArgs,
151 reply *ResourceProvisionerValidateResponse) error {
152 warns, errs := s.Provisioner.Validate(args.Config)
153 berrs := make([]*plugin.BasicError, len(errs))
154 for i, err := range errs {
155 berrs[i] = plugin.NewBasicError(err)
156 }
157 *reply = ResourceProvisionerValidateResponse{
158 Warnings: warns,
159 Errors: berrs,
160 }
161 return nil
162}
163
164func (s *ResourceProvisionerServer) Stop(
165 _ interface{},
166 reply *ResourceProvisionerStopResponse) error {
167 err := s.Provisioner.Stop()
168 *reply = ResourceProvisionerStopResponse{
169 Error: plugin.NewBasicError(err),
170 }
171
172 return nil
173}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/serve.go b/vendor/github.com/hashicorp/terraform/plugin/serve.go
new file mode 100644
index 0000000..2028a61
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/serve.go
@@ -0,0 +1,54 @@
1package plugin
2
3import (
4 "github.com/hashicorp/go-plugin"
5 "github.com/hashicorp/terraform/terraform"
6)
7
8// The constants below are the names of the plugins that can be dispensed
9// from the plugin server.
10const (
11 ProviderPluginName = "provider"
12 ProvisionerPluginName = "provisioner"
13)
14
15// Handshake is the HandshakeConfig used to configure clients and servers.
16var Handshake = plugin.HandshakeConfig{
17 // The ProtocolVersion is the version that must match between TF core
18 // and TF plugins. This should be bumped whenever a change happens in
19 // one or the other that makes it so that they can't safely communicate.
20 // This could be adding a new interface value, it could be how
21 // helper/schema computes diffs, etc.
22 ProtocolVersion: 4,
23
24 // The magic cookie values should NEVER be changed.
25 MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE",
26 MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2",
27}
28
29type ProviderFunc func() terraform.ResourceProvider
30type ProvisionerFunc func() terraform.ResourceProvisioner
31
32// ServeOpts are the configurations to serve a plugin.
33type ServeOpts struct {
34 ProviderFunc ProviderFunc
35 ProvisionerFunc ProvisionerFunc
36}
37
38// Serve serves a plugin. This function never returns and should be the final
39// function called in the main function of the plugin.
40func Serve(opts *ServeOpts) {
41 plugin.Serve(&plugin.ServeConfig{
42 HandshakeConfig: Handshake,
43 Plugins: pluginMap(opts),
44 })
45}
46
47// pluginMap returns the map[string]plugin.Plugin to use for configuring a plugin
48// server or client.
49func pluginMap(opts *ServeOpts) map[string]plugin.Plugin {
50 return map[string]plugin.Plugin{
51 "provider": &ResourceProviderPlugin{F: opts.ProviderFunc},
52 "provisioner": &ResourceProvisionerPlugin{F: opts.ProvisionerFunc},
53 }
54}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
new file mode 100644
index 0000000..493efc0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
@@ -0,0 +1,51 @@
1package plugin
2
3import (
4 "net/rpc"
5
6 "github.com/hashicorp/go-plugin"
7 "github.com/hashicorp/terraform/terraform"
8)
9
10// UIInput is an implementatin of terraform.UIInput that communicates
11// over RPC.
12type UIInput struct {
13 Client *rpc.Client
14}
15
16func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) {
17 var resp UIInputInputResponse
18 err := i.Client.Call("Plugin.Input", opts, &resp)
19 if err != nil {
20 return "", err
21 }
22 if resp.Error != nil {
23 err = resp.Error
24 return "", err
25 }
26
27 return resp.Value, nil
28}
29
30type UIInputInputResponse struct {
31 Value string
32 Error *plugin.BasicError
33}
34
35// UIInputServer is a net/rpc compatible structure for serving
36// a UIInputServer. This should not be used directly.
37type UIInputServer struct {
38 UIInput terraform.UIInput
39}
40
41func (s *UIInputServer) Input(
42 opts *terraform.InputOpts,
43 reply *UIInputInputResponse) error {
44 value, err := s.UIInput.Input(opts)
45 *reply = UIInputInputResponse{
46 Value: value,
47 Error: plugin.NewBasicError(err),
48 }
49
50 return nil
51}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_output.go b/vendor/github.com/hashicorp/terraform/plugin/ui_output.go
new file mode 100644
index 0000000..c222b00
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/ui_output.go
@@ -0,0 +1,29 @@
1package plugin
2
3import (
4 "net/rpc"
5
6 "github.com/hashicorp/terraform/terraform"
7)
8
9// UIOutput is an implementatin of terraform.UIOutput that communicates
10// over RPC.
11type UIOutput struct {
12 Client *rpc.Client
13}
14
15func (o *UIOutput) Output(v string) {
16 o.Client.Call("Plugin.Output", v, new(interface{}))
17}
18
19// UIOutputServer is the RPC server for serving UIOutput.
20type UIOutputServer struct {
21 UIOutput terraform.UIOutput
22}
23
24func (s *UIOutputServer) Output(
25 v string,
26 reply *interface{}) error {
27 s.UIOutput.Output(v)
28 return nil
29}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go
new file mode 100644
index 0000000..306128e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context.go
@@ -0,0 +1,1022 @@
1package terraform
2
3import (
4 "context"
5 "fmt"
6 "log"
7 "sort"
8 "strings"
9 "sync"
10
11 "github.com/hashicorp/go-multierror"
12 "github.com/hashicorp/hcl"
13 "github.com/hashicorp/terraform/config"
14 "github.com/hashicorp/terraform/config/module"
15 "github.com/hashicorp/terraform/helper/experiment"
16)
17
18// InputMode defines what sort of input will be asked for when Input
19// is called on Context.
20type InputMode byte
21
22const (
23 // InputModeVar asks for all variables
24 InputModeVar InputMode = 1 << iota
25
26 // InputModeVarUnset asks for variables which are not set yet.
27 // InputModeVar must be set for this to have an effect.
28 InputModeVarUnset
29
30 // InputModeProvider asks for provider variables
31 InputModeProvider
32
33 // InputModeStd is the standard operating mode and asks for both variables
34 // and providers.
35 InputModeStd = InputModeVar | InputModeProvider
36)
37
38var (
39 // contextFailOnShadowError will cause Context operations to return
40 // errors when shadow operations fail. This is only used for testing.
41 contextFailOnShadowError = false
42
43 // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every
44 // Plan operation, effectively testing the Diff DeepCopy whenever
45 // a Plan occurs. This is enabled for tests.
46 contextTestDeepCopyOnPlan = false
47)
48
49// ContextOpts are the user-configurable options to create a context with
50// NewContext.
51type ContextOpts struct {
52 Meta *ContextMeta
53 Destroy bool
54 Diff *Diff
55 Hooks []Hook
56 Module *module.Tree
57 Parallelism int
58 State *State
59 StateFutureAllowed bool
60 Providers map[string]ResourceProviderFactory
61 Provisioners map[string]ResourceProvisionerFactory
62 Shadow bool
63 Targets []string
64 Variables map[string]interface{}
65
66 UIInput UIInput
67}
68
69// ContextMeta is metadata about the running context. This is information
70// that this package or structure cannot determine on its own but exposes
71// into Terraform in various ways. This must be provided by the Context
72// initializer.
73type ContextMeta struct {
74 Env string // Env is the state environment
75}
76
77// Context represents all the context that Terraform needs in order to
78// perform operations on infrastructure. This structure is built using
79// NewContext. See the documentation for that.
80//
81// Extra functions on Context can be found in context_*.go files.
82type Context struct {
83 // Maintainer note: Anytime this struct is changed, please verify
84 // that newShadowContext still does the right thing. Tests should
85 // fail regardless but putting this note here as well.
86
87 components contextComponentFactory
88 destroy bool
89 diff *Diff
90 diffLock sync.RWMutex
91 hooks []Hook
92 meta *ContextMeta
93 module *module.Tree
94 sh *stopHook
95 shadow bool
96 state *State
97 stateLock sync.RWMutex
98 targets []string
99 uiInput UIInput
100 variables map[string]interface{}
101
102 l sync.Mutex // Lock acquired during any task
103 parallelSem Semaphore
104 providerInputConfig map[string]map[string]interface{}
105 runLock sync.Mutex
106 runCond *sync.Cond
107 runContext context.Context
108 runContextCancel context.CancelFunc
109 shadowErr error
110}
111
112// NewContext creates a new Context structure.
113//
114// Once a Context is creator, the pointer values within ContextOpts
115// should not be mutated in any way, since the pointers are copied, not
116// the values themselves.
117func NewContext(opts *ContextOpts) (*Context, error) {
118 // Validate the version requirement if it is given
119 if opts.Module != nil {
120 if err := checkRequiredVersion(opts.Module); err != nil {
121 return nil, err
122 }
123 }
124
125 // Copy all the hooks and add our stop hook. We don't append directly
126 // to the Config so that we're not modifying that in-place.
127 sh := new(stopHook)
128 hooks := make([]Hook, len(opts.Hooks)+1)
129 copy(hooks, opts.Hooks)
130 hooks[len(opts.Hooks)] = sh
131
132 state := opts.State
133 if state == nil {
134 state = new(State)
135 state.init()
136 }
137
138 // If our state is from the future, then error. Callers can avoid
139 // this error by explicitly setting `StateFutureAllowed`.
140 if !opts.StateFutureAllowed && state.FromFutureTerraform() {
141 return nil, fmt.Errorf(
142 "Terraform doesn't allow running any operations against a state\n"+
143 "that was written by a future Terraform version. The state is\n"+
144 "reporting it is written by Terraform '%s'.\n\n"+
145 "Please run at least that version of Terraform to continue.",
146 state.TFVersion)
147 }
148
149 // Explicitly reset our state version to our current version so that
150 // any operations we do will write out that our latest version
151 // has run.
152 state.TFVersion = Version
153
154 // Determine parallelism, default to 10. We do this both to limit
155 // CPU pressure but also to have an extra guard against rate throttling
156 // from providers.
157 par := opts.Parallelism
158 if par == 0 {
159 par = 10
160 }
161
162 // Set up the variables in the following sequence:
163 // 0 - Take default values from the configuration
164 // 1 - Take values from TF_VAR_x environment variables
165 // 2 - Take values specified in -var flags, overriding values
166 // set by environment variables if necessary. This includes
167 // values taken from -var-file in addition.
168 variables := make(map[string]interface{})
169
170 if opts.Module != nil {
171 var err error
172 variables, err = Variables(opts.Module, opts.Variables)
173 if err != nil {
174 return nil, err
175 }
176 }
177
178 diff := opts.Diff
179 if diff == nil {
180 diff = &Diff{}
181 }
182
183 return &Context{
184 components: &basicComponentFactory{
185 providers: opts.Providers,
186 provisioners: opts.Provisioners,
187 },
188 destroy: opts.Destroy,
189 diff: diff,
190 hooks: hooks,
191 meta: opts.Meta,
192 module: opts.Module,
193 shadow: opts.Shadow,
194 state: state,
195 targets: opts.Targets,
196 uiInput: opts.UIInput,
197 variables: variables,
198
199 parallelSem: NewSemaphore(par),
200 providerInputConfig: make(map[string]map[string]interface{}),
201 sh: sh,
202 }, nil
203}
204
205type ContextGraphOpts struct {
206 // If true, validates the graph structure (checks for cycles).
207 Validate bool
208
209 // Legacy graphs only: won't prune the graph
210 Verbose bool
211}
212
213// Graph returns the graph used for the given operation type.
214//
215// The most extensive or complex graph type is GraphTypePlan.
216func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
217 if opts == nil {
218 opts = &ContextGraphOpts{Validate: true}
219 }
220
221 log.Printf("[INFO] terraform: building graph: %s", typ)
222 switch typ {
223 case GraphTypeApply:
224 return (&ApplyGraphBuilder{
225 Module: c.module,
226 Diff: c.diff,
227 State: c.state,
228 Providers: c.components.ResourceProviders(),
229 Provisioners: c.components.ResourceProvisioners(),
230 Targets: c.targets,
231 Destroy: c.destroy,
232 Validate: opts.Validate,
233 }).Build(RootModulePath)
234
235 case GraphTypeInput:
236 // The input graph is just a slightly modified plan graph
237 fallthrough
238 case GraphTypeValidate:
239 // The validate graph is just a slightly modified plan graph
240 fallthrough
241 case GraphTypePlan:
242 // Create the plan graph builder
243 p := &PlanGraphBuilder{
244 Module: c.module,
245 State: c.state,
246 Providers: c.components.ResourceProviders(),
247 Targets: c.targets,
248 Validate: opts.Validate,
249 }
250
251 // Some special cases for other graph types shared with plan currently
252 var b GraphBuilder = p
253 switch typ {
254 case GraphTypeInput:
255 b = InputGraphBuilder(p)
256 case GraphTypeValidate:
257 // We need to set the provisioners so those can be validated
258 p.Provisioners = c.components.ResourceProvisioners()
259
260 b = ValidateGraphBuilder(p)
261 }
262
263 return b.Build(RootModulePath)
264
265 case GraphTypePlanDestroy:
266 return (&DestroyPlanGraphBuilder{
267 Module: c.module,
268 State: c.state,
269 Targets: c.targets,
270 Validate: opts.Validate,
271 }).Build(RootModulePath)
272
273 case GraphTypeRefresh:
274 return (&RefreshGraphBuilder{
275 Module: c.module,
276 State: c.state,
277 Providers: c.components.ResourceProviders(),
278 Targets: c.targets,
279 Validate: opts.Validate,
280 }).Build(RootModulePath)
281 }
282
283 return nil, fmt.Errorf("unknown graph type: %s", typ)
284}
285
286// ShadowError returns any errors caught during a shadow operation.
287//
288// A shadow operation is an operation run in parallel to a real operation
289// that performs the same tasks using new logic on copied state. The results
290// are compared to ensure that the new logic works the same as the old logic.
291// The shadow never affects the real operation or return values.
292//
293// The result of the shadow operation are only available through this function
294// call after a real operation is complete.
295//
296// For API consumers of Context, you can safely ignore this function
297// completely if you have no interest in helping report experimental feature
298// errors to Terraform maintainers. Otherwise, please call this function
299// after every operation and report this to the user.
300//
301// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
302// the real state or result of a real operation. They are purely informational
303// to assist in future Terraform versions being more stable. Please message
304// this effectively to the end user.
305//
306// This must be called only when no other operation is running (refresh,
307// plan, etc.). The result can be used in parallel to any other operation
308// running.
309func (c *Context) ShadowError() error {
310 return c.shadowErr
311}
312
313// State returns a copy of the current state associated with this context.
314//
315// This cannot safely be called in parallel with any other Context function.
316func (c *Context) State() *State {
317 return c.state.DeepCopy()
318}
319
320// Interpolater returns an Interpolater built on a copy of the state
321// that can be used to test interpolation values.
322func (c *Context) Interpolater() *Interpolater {
323 var varLock sync.Mutex
324 var stateLock sync.RWMutex
325 return &Interpolater{
326 Operation: walkApply,
327 Meta: c.meta,
328 Module: c.module,
329 State: c.state.DeepCopy(),
330 StateLock: &stateLock,
331 VariableValues: c.variables,
332 VariableValuesLock: &varLock,
333 }
334}
335
336// Input asks for input to fill variables and provider configurations.
337// This modifies the configuration in-place, so asking for Input twice
338// may result in different UI output showing different current values.
339func (c *Context) Input(mode InputMode) error {
340 defer c.acquireRun("input")()
341
342 if mode&InputModeVar != 0 {
343 // Walk the variables first for the root module. We walk them in
344 // alphabetical order for UX reasons.
345 rootConf := c.module.Config()
346 names := make([]string, len(rootConf.Variables))
347 m := make(map[string]*config.Variable)
348 for i, v := range rootConf.Variables {
349 names[i] = v.Name
350 m[v.Name] = v
351 }
352 sort.Strings(names)
353 for _, n := range names {
354 // If we only care about unset variables, then if the variable
355 // is set, continue on.
356 if mode&InputModeVarUnset != 0 {
357 if _, ok := c.variables[n]; ok {
358 continue
359 }
360 }
361
362 var valueType config.VariableType
363
364 v := m[n]
365 switch valueType = v.Type(); valueType {
366 case config.VariableTypeUnknown:
367 continue
368 case config.VariableTypeMap:
369 // OK
370 case config.VariableTypeList:
371 // OK
372 case config.VariableTypeString:
373 // OK
374 default:
375 panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
376 }
377
378 // If the variable is not already set, and the variable defines a
379 // default, use that for the value.
380 if _, ok := c.variables[n]; !ok {
381 if v.Default != nil {
382 c.variables[n] = v.Default.(string)
383 continue
384 }
385 }
386
387 // this should only happen during tests
388 if c.uiInput == nil {
389 log.Println("[WARN] Content.uiInput is nil")
390 continue
391 }
392
393 // Ask the user for a value for this variable
394 var value string
395 retry := 0
396 for {
397 var err error
398 value, err = c.uiInput.Input(&InputOpts{
399 Id: fmt.Sprintf("var.%s", n),
400 Query: fmt.Sprintf("var.%s", n),
401 Description: v.Description,
402 })
403 if err != nil {
404 return fmt.Errorf(
405 "Error asking for %s: %s", n, err)
406 }
407
408 if value == "" && v.Required() {
409 // Redo if it is required, but abort if we keep getting
410 // blank entries
411 if retry > 2 {
412 return fmt.Errorf("missing required value for %q", n)
413 }
414 retry++
415 continue
416 }
417
418 break
419 }
420
421 // no value provided, so don't set the variable at all
422 if value == "" {
423 continue
424 }
425
426 decoded, err := parseVariableAsHCL(n, value, valueType)
427 if err != nil {
428 return err
429 }
430
431 if decoded != nil {
432 c.variables[n] = decoded
433 }
434 }
435 }
436
437 if mode&InputModeProvider != 0 {
438 // Build the graph
439 graph, err := c.Graph(GraphTypeInput, nil)
440 if err != nil {
441 return err
442 }
443
444 // Do the walk
445 if _, err := c.walk(graph, nil, walkInput); err != nil {
446 return err
447 }
448 }
449
450 return nil
451}
452
453// Apply applies the changes represented by this context and returns
454// the resulting state.
455//
456// Even in the case an error is returned, the state may be returned and will
457// potentially be partially updated. In addition to returning the resulting
458// state, this context is updated with the latest state.
459//
460// If the state is required after an error, the caller should call
461// Context.State, rather than rely on the return value.
462//
463// TODO: Apply and Refresh should either always return a state, or rely on the
464// State() method. Currently the helper/resource testing framework relies
465// on the absence of a returned state to determine if Destroy can be
466// called, so that will need to be refactored before this can be changed.
467func (c *Context) Apply() (*State, error) {
468 defer c.acquireRun("apply")()
469
470 // Copy our own state
471 c.state = c.state.DeepCopy()
472
473 // Build the graph.
474 graph, err := c.Graph(GraphTypeApply, nil)
475 if err != nil {
476 return nil, err
477 }
478
479 // Determine the operation
480 operation := walkApply
481 if c.destroy {
482 operation = walkDestroy
483 }
484
485 // Walk the graph
486 walker, err := c.walk(graph, graph, operation)
487 if len(walker.ValidationErrors) > 0 {
488 err = multierror.Append(err, walker.ValidationErrors...)
489 }
490
491 // Clean out any unused things
492 c.state.prune()
493
494 return c.state, err
495}
496
497// Plan generates an execution plan for the given context.
498//
499// The execution plan encapsulates the context and can be stored
500// in order to reinstantiate a context later for Apply.
501//
502// Plan also updates the diff of this context to be the diff generated
503// by the plan, so Apply can be called after.
504func (c *Context) Plan() (*Plan, error) {
505 defer c.acquireRun("plan")()
506
507 p := &Plan{
508 Module: c.module,
509 Vars: c.variables,
510 State: c.state,
511 Targets: c.targets,
512 }
513
514 var operation walkOperation
515 if c.destroy {
516 operation = walkPlanDestroy
517 } else {
518 // Set our state to be something temporary. We do this so that
519 // the plan can update a fake state so that variables work, then
520 // we replace it back with our old state.
521 old := c.state
522 if old == nil {
523 c.state = &State{}
524 c.state.init()
525 } else {
526 c.state = old.DeepCopy()
527 }
528 defer func() {
529 c.state = old
530 }()
531
532 operation = walkPlan
533 }
534
535 // Setup our diff
536 c.diffLock.Lock()
537 c.diff = new(Diff)
538 c.diff.init()
539 c.diffLock.Unlock()
540
541 // Build the graph.
542 graphType := GraphTypePlan
543 if c.destroy {
544 graphType = GraphTypePlanDestroy
545 }
546 graph, err := c.Graph(graphType, nil)
547 if err != nil {
548 return nil, err
549 }
550
551 // Do the walk
552 walker, err := c.walk(graph, graph, operation)
553 if err != nil {
554 return nil, err
555 }
556 p.Diff = c.diff
557
558 // If this is true, it means we're running unit tests. In this case,
559 // we perform a deep copy just to ensure that all context tests also
560 // test that a diff is copy-able. This will panic if it fails. This
561 // is enabled during unit tests.
562 //
563 // This should never be true during production usage, but even if it is,
564 // it can't do any real harm.
565 if contextTestDeepCopyOnPlan {
566 p.Diff.DeepCopy()
567 }
568
569 /*
570 // We don't do the reverification during the new destroy plan because
571 // it will use a different apply process.
572 if X_legacyGraph {
573 // Now that we have a diff, we can build the exact graph that Apply will use
574 // and catch any possible cycles during the Plan phase.
575 if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
576 return nil, err
577 }
578 }
579 */
580
581 var errs error
582 if len(walker.ValidationErrors) > 0 {
583 errs = multierror.Append(errs, walker.ValidationErrors...)
584 }
585 return p, errs
586}
587
588// Refresh goes through all the resources in the state and refreshes them
589// to their latest state. This will update the state that this context
590// works with, along with returning it.
591//
592// Even in the case an error is returned, the state may be returned and
593// will potentially be partially updated.
594func (c *Context) Refresh() (*State, error) {
595 defer c.acquireRun("refresh")()
596
597 // Copy our own state
598 c.state = c.state.DeepCopy()
599
600 // Build the graph.
601 graph, err := c.Graph(GraphTypeRefresh, nil)
602 if err != nil {
603 return nil, err
604 }
605
606 // Do the walk
607 if _, err := c.walk(graph, graph, walkRefresh); err != nil {
608 return nil, err
609 }
610
611 // Clean out any unused things
612 c.state.prune()
613
614 return c.state, nil
615}
616
617// Stop stops the running task.
618//
619// Stop will block until the task completes.
620func (c *Context) Stop() {
621 log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
622
623 c.l.Lock()
624 defer c.l.Unlock()
625
626 // If we're running, then stop
627 if c.runContextCancel != nil {
628 log.Printf("[WARN] terraform: run context exists, stopping")
629
630 // Tell the hook we want to stop
631 c.sh.Stop()
632
633 // Stop the context
634 c.runContextCancel()
635 c.runContextCancel = nil
636 }
637
638 // Grab the condition var before we exit
639 if cond := c.runCond; cond != nil {
640 cond.Wait()
641 }
642
643 log.Printf("[WARN] terraform: stop complete")
644}
645
646// Validate validates the configuration and returns any warnings or errors.
647func (c *Context) Validate() ([]string, []error) {
648 defer c.acquireRun("validate")()
649
650 var errs error
651
652 // Validate the configuration itself
653 if err := c.module.Validate(); err != nil {
654 errs = multierror.Append(errs, err)
655 }
656
657 // This only needs to be done for the root module, since inter-module
658 // variables are validated in the module tree.
659 if config := c.module.Config(); config != nil {
660 // Validate the user variables
661 if err := smcUserVariables(config, c.variables); len(err) > 0 {
662 errs = multierror.Append(errs, err...)
663 }
664 }
665
666 // If we have errors at this point, the graphing has no chance,
667 // so just bail early.
668 if errs != nil {
669 return nil, []error{errs}
670 }
671
672 // Build the graph so we can walk it and run Validate on nodes.
673 // We also validate the graph generated here, but this graph doesn't
674 // necessarily match the graph that Plan will generate, so we'll validate the
675 // graph again later after Planning.
676 graph, err := c.Graph(GraphTypeValidate, nil)
677 if err != nil {
678 return nil, []error{err}
679 }
680
681 // Walk
682 walker, err := c.walk(graph, graph, walkValidate)
683 if err != nil {
684 return nil, multierror.Append(errs, err).Errors
685 }
686
687 // Return the result
688 rerrs := multierror.Append(errs, walker.ValidationErrors...)
689
690 sort.Strings(walker.ValidationWarnings)
691 sort.Slice(rerrs.Errors, func(i, j int) bool {
692 return rerrs.Errors[i].Error() < rerrs.Errors[j].Error()
693 })
694
695 return walker.ValidationWarnings, rerrs.Errors
696}
697
698// Module returns the module tree associated with this context.
699func (c *Context) Module() *module.Tree {
700 return c.module
701}
702
703// Variables will return the mapping of variables that were defined
704// for this Context. If Input was called, this mapping may be different
705// than what was given.
706func (c *Context) Variables() map[string]interface{} {
707 return c.variables
708}
709
710// SetVariable sets a variable after a context has already been built.
711func (c *Context) SetVariable(k string, v interface{}) {
712 c.variables[k] = v
713}
714
715func (c *Context) acquireRun(phase string) func() {
716 // With the run lock held, grab the context lock to make changes
717 // to the run context.
718 c.l.Lock()
719 defer c.l.Unlock()
720
721 // Wait until we're no longer running
722 for c.runCond != nil {
723 c.runCond.Wait()
724 }
725
726 // Build our lock
727 c.runCond = sync.NewCond(&c.l)
728
729 // Setup debugging
730 dbug.SetPhase(phase)
731
732 // Create a new run context
733 c.runContext, c.runContextCancel = context.WithCancel(context.Background())
734
735 // Reset the stop hook so we're not stopped
736 c.sh.Reset()
737
738 // Reset the shadow errors
739 c.shadowErr = nil
740
741 return c.releaseRun
742}
743
744func (c *Context) releaseRun() {
745 // Grab the context lock so that we can make modifications to fields
746 c.l.Lock()
747 defer c.l.Unlock()
748
749 // setting the phase to "INVALID" lets us easily detect if we have
750 // operations happening outside of a run, or we missed setting the proper
751 // phase
752 dbug.SetPhase("INVALID")
753
754 // End our run. We check if runContext is non-nil because it can be
755 // set to nil if it was cancelled via Stop()
756 if c.runContextCancel != nil {
757 c.runContextCancel()
758 }
759
760 // Unlock all waiting our condition
761 cond := c.runCond
762 c.runCond = nil
763 cond.Broadcast()
764
765 // Unset the context
766 c.runContext = nil
767}
768
769func (c *Context) walk(
770 graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) {
771 // Keep track of the "real" context which is the context that does
772 // the real work: talking to real providers, modifying real state, etc.
773 realCtx := c
774
775 // If we don't want shadowing, remove it
776 if !experiment.Enabled(experiment.X_shadow) {
777 shadow = nil
778 }
779
780 // Just log this so we can see it in a debug log
781 if !c.shadow {
782 log.Printf("[WARN] terraform: shadow graph disabled")
783 shadow = nil
784 }
785
786 // If we have a shadow graph, walk that as well
787 var shadowCtx *Context
788 var shadowCloser Shadow
789 if shadow != nil {
790 // Build the shadow context. In the process, override the real context
791 // with the one that is wrapped so that the shadow context can verify
792 // the results of the real.
793 realCtx, shadowCtx, shadowCloser = newShadowContext(c)
794 }
795
796 log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
797
798 walker := &ContextGraphWalker{
799 Context: realCtx,
800 Operation: operation,
801 StopContext: c.runContext,
802 }
803
804 // Watch for a stop so we can call the provider Stop() API.
805 watchStop, watchWait := c.watchStop(walker)
806
807 // Walk the real graph, this will block until it completes
808 realErr := graph.Walk(walker)
809
810 // Close the channel so the watcher stops, and wait for it to return.
811 close(watchStop)
812 <-watchWait
813
814 // If we have a shadow graph and we interrupted the real graph, then
815 // we just close the shadow and never verify it. It is non-trivial to
816 // recreate the exact execution state up until an interruption so this
817 // isn't supported with shadows at the moment.
818 if shadowCloser != nil && c.sh.Stopped() {
819 // Ignore the error result, there is nothing we could care about
820 shadowCloser.CloseShadow()
821
822 // Set it to nil so we don't do anything
823 shadowCloser = nil
824 }
825
826 // If we have a shadow graph, wait for that to complete.
827 if shadowCloser != nil {
828 // Build the graph walker for the shadow. We also wrap this in
829 // a panicwrap so that panics are captured. For the shadow graph,
830 // we just want panics to be normal errors rather than to crash
831 // Terraform.
832 shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{
833 Context: shadowCtx,
834 Operation: operation,
835 })
836
837 // Kick off the shadow walk. This will block on any operations
838 // on the real walk so it is fine to start first.
839 log.Printf("[INFO] Starting shadow graph walk: %s", operation.String())
840 shadowCh := make(chan error)
841 go func() {
842 shadowCh <- shadow.Walk(shadowWalker)
843 }()
844
845 // Notify the shadow that we're done
846 if err := shadowCloser.CloseShadow(); err != nil {
847 c.shadowErr = multierror.Append(c.shadowErr, err)
848 }
849
850 // Wait for the walk to end
851 log.Printf("[DEBUG] Waiting for shadow graph to complete...")
852 shadowWalkErr := <-shadowCh
853
854 // Get any shadow errors
855 if err := shadowCloser.ShadowError(); err != nil {
856 c.shadowErr = multierror.Append(c.shadowErr, err)
857 }
858
859 // Verify the contexts (compare)
860 if err := shadowContextVerify(realCtx, shadowCtx); err != nil {
861 c.shadowErr = multierror.Append(c.shadowErr, err)
862 }
863
864 // At this point, if we're supposed to fail on error, then
865 // we PANIC. Some tests just verify that there is an error,
866 // so simply appending it to realErr and returning could hide
867 // shadow problems.
868 //
869 // This must be done BEFORE appending shadowWalkErr since the
870 // shadowWalkErr may include expected errors.
871 //
872 // We only do this if we don't have a real error. In the case of
873 // a real error, we can't guarantee what nodes were and weren't
874 // traversed in parallel scenarios so we can't guarantee no
875 // shadow errors.
876 if c.shadowErr != nil && contextFailOnShadowError && realErr == nil {
877 panic(multierror.Prefix(c.shadowErr, "shadow graph:"))
878 }
879
880 // Now, if we have a walk error, we append that through
881 if shadowWalkErr != nil {
882 c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr)
883 }
884
885 if c.shadowErr == nil {
886 log.Printf("[INFO] Shadow graph success!")
887 } else {
888 log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr)
889
890 // If we're supposed to fail on shadow errors, then report it
891 if contextFailOnShadowError {
892 realErr = multierror.Append(realErr, multierror.Prefix(
893 c.shadowErr, "shadow graph:"))
894 }
895 }
896 }
897
898 return walker, realErr
899}
900
901// watchStop immediately returns a `stop` and a `wait` chan after dispatching
902// the watchStop goroutine. This will watch the runContext for cancellation and
903// stop the providers accordingly. When the watch is no longer needed, the
904// `stop` chan should be closed before waiting on the `wait` chan.
905// The `wait` chan is important, because without synchronizing with the end of
906// the watchStop goroutine, the runContext may also be closed during the select
907// incorrectly causing providers to be stopped. Even if the graph walk is done
908// at that point, stopping a provider permanently cancels its StopContext which
909// can cause later actions to fail.
910func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) {
911 stop := make(chan struct{})
912 wait := make(chan struct{})
913
914 // get the runContext cancellation channel now, because releaseRun will
915 // write to the runContext field.
916 done := c.runContext.Done()
917
918 go func() {
919 defer close(wait)
920 // Wait for a stop or completion
921 select {
922 case <-done:
923 // done means the context was canceled, so we need to try and stop
924 // providers.
925 case <-stop:
926 // our own stop channel was closed.
927 return
928 }
929
930 // If we're here, we're stopped, trigger the call.
931
932 {
933 // Copy the providers so that a misbehaved blocking Stop doesn't
934 // completely hang Terraform.
935 walker.providerLock.Lock()
936 ps := make([]ResourceProvider, 0, len(walker.providerCache))
937 for _, p := range walker.providerCache {
938 ps = append(ps, p)
939 }
940 defer walker.providerLock.Unlock()
941
942 for _, p := range ps {
943 // We ignore the error for now since there isn't any reasonable
944 // action to take if there is an error here, since the stop is still
945 // advisory: Terraform will exit once the graph node completes.
946 p.Stop()
947 }
948 }
949
950 {
951 // Call stop on all the provisioners
952 walker.provisionerLock.Lock()
953 ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache))
954 for _, p := range walker.provisionerCache {
955 ps = append(ps, p)
956 }
957 defer walker.provisionerLock.Unlock()
958
959 for _, p := range ps {
960 // We ignore the error for now since there isn't any reasonable
961 // action to take if there is an error here, since the stop is still
962 // advisory: Terraform will exit once the graph node completes.
963 p.Stop()
964 }
965 }
966 }()
967
968 return stop, wait
969}
970
971// parseVariableAsHCL parses the value of a single variable as would have been specified
972// on the command line via -var or in an environment variable named TF_VAR_x, where x is
973// the name of the variable. In order to get around the restriction of HCL requiring a
974// top level object, we prepend a sentinel key, decode the user-specified value as its
975// value and pull the value back out of the resulting map.
976func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) {
977 // expecting a string so don't decode anything, just strip quotes
978 if targetType == config.VariableTypeString {
979 return strings.Trim(input, `"`), nil
980 }
981
982 // return empty types
983 if strings.TrimSpace(input) == "" {
984 switch targetType {
985 case config.VariableTypeList:
986 return []interface{}{}, nil
987 case config.VariableTypeMap:
988 return make(map[string]interface{}), nil
989 }
990 }
991
992 const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
993 inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input)
994
995 var decoded map[string]interface{}
996 err := hcl.Decode(&decoded, inputWithSentinal)
997 if err != nil {
998 return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err)
999 }
1000
1001 if len(decoded) != 1 {
1002 return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input)
1003 }
1004
1005 parsedValue, ok := decoded[sentinelValue]
1006 if !ok {
1007 return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
1008 }
1009
1010 switch targetType {
1011 case config.VariableTypeList:
1012 return parsedValue, nil
1013 case config.VariableTypeMap:
1014 if list, ok := parsedValue.([]map[string]interface{}); ok {
1015 return list[0], nil
1016 }
1017
1018 return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
1019 default:
1020 panic(fmt.Errorf("unknown type %s", targetType.Printable()))
1021 }
1022}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
new file mode 100644
index 0000000..6f50744
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
@@ -0,0 +1,65 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// contextComponentFactory is the interface that Context uses
8// to initialize various components such as providers and provisioners.
9// This factory gets more information than the raw maps using to initialize
10// a Context. This information is used for debugging.
11type contextComponentFactory interface {
12 // ResourceProvider creates a new ResourceProvider with the given
13 // type. The "uid" is a unique identifier for this provider being
14 // initialized that can be used for internal tracking.
15 ResourceProvider(typ, uid string) (ResourceProvider, error)
16 ResourceProviders() []string
17
18 // ResourceProvisioner creates a new ResourceProvisioner with the
19 // given type. The "uid" is a unique identifier for this provisioner
20 // being initialized that can be used for internal tracking.
21 ResourceProvisioner(typ, uid string) (ResourceProvisioner, error)
22 ResourceProvisioners() []string
23}
24
25// basicComponentFactory just calls a factory from a map directly.
26type basicComponentFactory struct {
27 providers map[string]ResourceProviderFactory
28 provisioners map[string]ResourceProvisionerFactory
29}
30
31func (c *basicComponentFactory) ResourceProviders() []string {
32 result := make([]string, len(c.providers))
33 for k, _ := range c.providers {
34 result = append(result, k)
35 }
36
37 return result
38}
39
40func (c *basicComponentFactory) ResourceProvisioners() []string {
41 result := make([]string, len(c.provisioners))
42 for k, _ := range c.provisioners {
43 result = append(result, k)
44 }
45
46 return result
47}
48
49func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) {
50 f, ok := c.providers[typ]
51 if !ok {
52 return nil, fmt.Errorf("unknown provider %q", typ)
53 }
54
55 return f()
56}
57
58func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) {
59 f, ok := c.provisioners[typ]
60 if !ok {
61 return nil, fmt.Errorf("unknown provisioner %q", typ)
62 }
63
64 return f()
65}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
new file mode 100644
index 0000000..084f010
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
@@ -0,0 +1,32 @@
1package terraform
2
3//go:generate stringer -type=GraphType context_graph_type.go
4
5// GraphType is an enum of the type of graph to create with a Context.
6// The values of the constants may change so they shouldn't be depended on;
7// always use the constant name.
8type GraphType byte
9
10const (
11 GraphTypeInvalid GraphType = 0
12 GraphTypeLegacy GraphType = iota
13 GraphTypeRefresh
14 GraphTypePlan
15 GraphTypePlanDestroy
16 GraphTypeApply
17 GraphTypeInput
18 GraphTypeValidate
19)
20
21// GraphTypeMap is a mapping of human-readable string to GraphType. This
22// is useful to use as the mechanism for human input for configurable
23// graph types.
24var GraphTypeMap = map[string]GraphType{
25 "apply": GraphTypeApply,
26 "input": GraphTypeInput,
27 "plan": GraphTypePlan,
28 "plan-destroy": GraphTypePlanDestroy,
29 "refresh": GraphTypeRefresh,
30 "legacy": GraphTypeLegacy,
31 "validate": GraphTypeValidate,
32}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
new file mode 100644
index 0000000..f1d5776
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
@@ -0,0 +1,77 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5)
6
7// ImportOpts are used as the configuration for Import.
8type ImportOpts struct {
9 // Targets are the targets to import
10 Targets []*ImportTarget
11
12 // Module is optional, and specifies a config module that is loaded
13 // into the graph and evaluated. The use case for this is to provide
14 // provider configuration.
15 Module *module.Tree
16}
17
18// ImportTarget is a single resource to import.
19type ImportTarget struct {
20 // Addr is the full resource address of the resource to import.
21 // Example: "module.foo.aws_instance.bar"
22 Addr string
23
24 // ID is the ID of the resource to import. This is resource-specific.
25 ID string
26
27 // Provider string
28 Provider string
29}
30
31// Import takes already-created external resources and brings them
32// under Terraform management. Import requires the exact type, name, and ID
33// of the resources to import.
34//
35// This operation is idempotent. If the requested resource is already
36// imported, no changes are made to the state.
37//
38// Further, this operation also gracefully handles partial state. If during
39// an import there is a failure, all previously imported resources remain
40// imported.
41func (c *Context) Import(opts *ImportOpts) (*State, error) {
42 // Hold a lock since we can modify our own state here
43 defer c.acquireRun("import")()
44
45 // Copy our own state
46 c.state = c.state.DeepCopy()
47
48 // If no module is given, default to the module configured with
49 // the Context.
50 module := opts.Module
51 if module == nil {
52 module = c.module
53 }
54
55 // Initialize our graph builder
56 builder := &ImportGraphBuilder{
57 ImportTargets: opts.Targets,
58 Module: module,
59 Providers: c.components.ResourceProviders(),
60 }
61
62 // Build the graph!
63 graph, err := builder.Build(RootModulePath)
64 if err != nil {
65 return c.state, err
66 }
67
68 // Walk it
69 if _, err := c.walk(graph, nil, walkImport); err != nil {
70 return c.state, err
71 }
72
73 // Clean the state
74 c.state.prune()
75
76 return c.state, nil
77}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/debug.go b/vendor/github.com/hashicorp/terraform/terraform/debug.go
new file mode 100644
index 0000000..265339f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/debug.go
@@ -0,0 +1,523 @@
1package terraform
2
3import (
4 "archive/tar"
5 "bytes"
6 "compress/gzip"
7 "encoding/json"
8 "fmt"
9 "io"
10 "os"
11 "path/filepath"
12 "sync"
13 "time"
14)
15
16// DebugInfo is the global handler for writing the debug archive. All methods
17// are safe to call concurrently. Setting DebugInfo to nil will disable writing
18// the debug archive. All methods are safe to call on the nil value.
19var dbug *debugInfo
20
21// SetDebugInfo initializes the debug handler with a backing file in the
22// provided directory. This must be called before any other terraform package
23// operations or not at all. Once his is called, CloseDebugInfo should be
24// called before program exit.
25func SetDebugInfo(path string) error {
26 if os.Getenv("TF_DEBUG") == "" {
27 return nil
28 }
29
30 di, err := newDebugInfoFile(path)
31 if err != nil {
32 return err
33 }
34
35 dbug = di
36 return nil
37}
38
39// CloseDebugInfo is the exported interface to Close the debug info handler.
40// The debug handler needs to be closed before program exit, so we export this
41// function to be deferred in the appropriate entrypoint for our executable.
42func CloseDebugInfo() error {
43 return dbug.Close()
44}
45
46// newDebugInfoFile initializes the global debug handler with a backing file in
47// the provided directory.
48func newDebugInfoFile(dir string) (*debugInfo, error) {
49 err := os.MkdirAll(dir, 0755)
50 if err != nil {
51 return nil, err
52 }
53
54 // FIXME: not guaranteed unique, but good enough for now
55 name := fmt.Sprintf("debug-%s", time.Now().Format("2006-01-02-15-04-05.999999999"))
56 archivePath := filepath.Join(dir, name+".tar.gz")
57
58 f, err := os.OpenFile(archivePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
59 if err != nil {
60 return nil, err
61 }
62 return newDebugInfo(name, f)
63}
64
65// newDebugInfo initializes the global debug handler.
66func newDebugInfo(name string, w io.Writer) (*debugInfo, error) {
67 gz := gzip.NewWriter(w)
68
69 d := &debugInfo{
70 name: name,
71 w: w,
72 gz: gz,
73 tar: tar.NewWriter(gz),
74 }
75
76 // create the subdirs we need
77 topHdr := &tar.Header{
78 Name: name,
79 Typeflag: tar.TypeDir,
80 Mode: 0755,
81 }
82 graphsHdr := &tar.Header{
83 Name: name + "/graphs",
84 Typeflag: tar.TypeDir,
85 Mode: 0755,
86 }
87 err := d.tar.WriteHeader(topHdr)
88 // if the first errors, the second will too
89 err = d.tar.WriteHeader(graphsHdr)
90 if err != nil {
91 return nil, err
92 }
93
94 return d, nil
95}
96
97// debugInfo provides various methods for writing debug information to a
98// central archive. The debugInfo struct should be initialized once before any
99// output is written, and Close should be called before program exit. All
100// exported methods on debugInfo will be safe for concurrent use. The exported
101// methods are also all safe to call on a nil pointer, so that there is no need
102// for conditional blocks before writing debug information.
103//
104// Each write operation done by the debugInfo will flush the gzip.Writer and
105// tar.Writer, and call Sync() or Flush() on the output writer as needed. This
106// ensures that as much data as possible is written to storage in the event of
107// a crash. The append format of the tar file, and the stream format of the
108// gzip writer allow easy recovery f the data in the event that the debugInfo
109// is not closed before program exit.
110type debugInfo struct {
111 sync.Mutex
112
113 // archive root directory name
114 name string
115
116 // current operation phase
117 phase string
118
119 // step is monotonic counter for for recording the order of operations
120 step int
121
122 // flag to protect Close()
123 closed bool
124
125 // the debug log output is in a tar.gz format, written to the io.Writer w
126 w io.Writer
127 gz *gzip.Writer
128 tar *tar.Writer
129}
130
131// Set the name of the current operational phase in the debug handler. Each file
132// in the archive will contain the name of the phase in which it was created,
133// i.e. "input", "apply", "plan", "refresh", "validate"
134func (d *debugInfo) SetPhase(phase string) {
135 if d == nil {
136 return
137 }
138 d.Lock()
139 defer d.Unlock()
140
141 d.phase = phase
142}
143
144// Close the debugInfo, finalizing the data in storage. This closes the
145// tar.Writer, the gzip.Wrtier, and if the output writer is an io.Closer, it is
146// also closed.
147func (d *debugInfo) Close() error {
148 if d == nil {
149 return nil
150 }
151
152 d.Lock()
153 defer d.Unlock()
154
155 if d.closed {
156 return nil
157 }
158 d.closed = true
159
160 d.tar.Close()
161 d.gz.Close()
162
163 if c, ok := d.w.(io.Closer); ok {
164 return c.Close()
165 }
166 return nil
167}
168
169// debug buffer is an io.WriteCloser that will write itself to the debug
170// archive when closed.
171type debugBuffer struct {
172 debugInfo *debugInfo
173 name string
174 buf bytes.Buffer
175}
176
177func (b *debugBuffer) Write(d []byte) (int, error) {
178 return b.buf.Write(d)
179}
180
181func (b *debugBuffer) Close() error {
182 return b.debugInfo.WriteFile(b.name, b.buf.Bytes())
183}
184
185// ioutils only has a noop ReadCloser
186type nopWriteCloser struct{}
187
188func (nopWriteCloser) Write([]byte) (int, error) { return 0, nil }
189func (nopWriteCloser) Close() error { return nil }
190
191// NewFileWriter returns an io.WriteClose that will be buffered and written to
192// the debug archive when closed.
193func (d *debugInfo) NewFileWriter(name string) io.WriteCloser {
194 if d == nil {
195 return nopWriteCloser{}
196 }
197
198 return &debugBuffer{
199 debugInfo: d,
200 name: name,
201 }
202}
203
204type syncer interface {
205 Sync() error
206}
207
208type flusher interface {
209 Flush() error
210}
211
212// Flush the tar.Writer and the gzip.Writer. Flush() or Sync() will be called
213// on the output writer if they are available.
214func (d *debugInfo) flush() {
215 d.tar.Flush()
216 d.gz.Flush()
217
218 if f, ok := d.w.(flusher); ok {
219 f.Flush()
220 }
221
222 if s, ok := d.w.(syncer); ok {
223 s.Sync()
224 }
225}
226
227// WriteFile writes data as a single file to the debug arhive.
228func (d *debugInfo) WriteFile(name string, data []byte) error {
229 if d == nil {
230 return nil
231 }
232
233 d.Lock()
234 defer d.Unlock()
235 return d.writeFile(name, data)
236}
237
238func (d *debugInfo) writeFile(name string, data []byte) error {
239 defer d.flush()
240 path := fmt.Sprintf("%s/%d-%s-%s", d.name, d.step, d.phase, name)
241 d.step++
242
243 hdr := &tar.Header{
244 Name: path,
245 Mode: 0644,
246 Size: int64(len(data)),
247 }
248 err := d.tar.WriteHeader(hdr)
249 if err != nil {
250 return err
251 }
252
253 _, err = d.tar.Write(data)
254 return err
255}
256
257// DebugHook implements all methods of the terraform.Hook interface, and writes
258// the arguments to a file in the archive. When a suitable format for the
259// argument isn't available, the argument is encoded using json.Marshal. If the
260// debug handler is nil, all DebugHook methods are noop, so no time is spent in
261// marshaling the data structures.
262type DebugHook struct{}
263
264func (*DebugHook) PreApply(ii *InstanceInfo, is *InstanceState, id *InstanceDiff) (HookAction, error) {
265 if dbug == nil {
266 return HookActionContinue, nil
267 }
268
269 var buf bytes.Buffer
270
271 if ii != nil {
272 buf.WriteString(ii.HumanId() + "\n")
273 }
274
275 if is != nil {
276 buf.WriteString(is.String() + "\n")
277 }
278
279 idCopy, err := id.Copy()
280 if err != nil {
281 return HookActionContinue, err
282 }
283 js, err := json.MarshalIndent(idCopy, "", " ")
284 if err != nil {
285 return HookActionContinue, err
286 }
287 buf.Write(js)
288
289 dbug.WriteFile("hook-PreApply", buf.Bytes())
290
291 return HookActionContinue, nil
292}
293
294func (*DebugHook) PostApply(ii *InstanceInfo, is *InstanceState, err error) (HookAction, error) {
295 if dbug == nil {
296 return HookActionContinue, nil
297 }
298
299 var buf bytes.Buffer
300
301 if ii != nil {
302 buf.WriteString(ii.HumanId() + "\n")
303 }
304
305 if is != nil {
306 buf.WriteString(is.String() + "\n")
307 }
308
309 if err != nil {
310 buf.WriteString(err.Error())
311 }
312
313 dbug.WriteFile("hook-PostApply", buf.Bytes())
314
315 return HookActionContinue, nil
316}
317
318func (*DebugHook) PreDiff(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
319 if dbug == nil {
320 return HookActionContinue, nil
321 }
322
323 var buf bytes.Buffer
324 if ii != nil {
325 buf.WriteString(ii.HumanId() + "\n")
326 }
327
328 if is != nil {
329 buf.WriteString(is.String())
330 buf.WriteString("\n")
331 }
332 dbug.WriteFile("hook-PreDiff", buf.Bytes())
333
334 return HookActionContinue, nil
335}
336
337func (*DebugHook) PostDiff(ii *InstanceInfo, id *InstanceDiff) (HookAction, error) {
338 if dbug == nil {
339 return HookActionContinue, nil
340 }
341
342 var buf bytes.Buffer
343 if ii != nil {
344 buf.WriteString(ii.HumanId() + "\n")
345 }
346
347 idCopy, err := id.Copy()
348 if err != nil {
349 return HookActionContinue, err
350 }
351 js, err := json.MarshalIndent(idCopy, "", " ")
352 if err != nil {
353 return HookActionContinue, err
354 }
355 buf.Write(js)
356
357 dbug.WriteFile("hook-PostDiff", buf.Bytes())
358
359 return HookActionContinue, nil
360}
361
362func (*DebugHook) PreProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
363 if dbug == nil {
364 return HookActionContinue, nil
365 }
366
367 var buf bytes.Buffer
368 if ii != nil {
369 buf.WriteString(ii.HumanId() + "\n")
370 }
371
372 if is != nil {
373 buf.WriteString(is.String())
374 buf.WriteString("\n")
375 }
376 dbug.WriteFile("hook-PreProvisionResource", buf.Bytes())
377
378 return HookActionContinue, nil
379}
380
381func (*DebugHook) PostProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
382 if dbug == nil {
383 return HookActionContinue, nil
384 }
385
386 var buf bytes.Buffer
387 if ii != nil {
388 buf.WriteString(ii.HumanId())
389 buf.WriteString("\n")
390 }
391
392 if is != nil {
393 buf.WriteString(is.String())
394 buf.WriteString("\n")
395 }
396 dbug.WriteFile("hook-PostProvisionResource", buf.Bytes())
397 return HookActionContinue, nil
398}
399
400func (*DebugHook) PreProvision(ii *InstanceInfo, s string) (HookAction, error) {
401 if dbug == nil {
402 return HookActionContinue, nil
403 }
404
405 var buf bytes.Buffer
406 if ii != nil {
407 buf.WriteString(ii.HumanId())
408 buf.WriteString("\n")
409 }
410 buf.WriteString(s + "\n")
411
412 dbug.WriteFile("hook-PreProvision", buf.Bytes())
413 return HookActionContinue, nil
414}
415
416func (*DebugHook) PostProvision(ii *InstanceInfo, s string, err error) (HookAction, error) {
417 if dbug == nil {
418 return HookActionContinue, nil
419 }
420
421 var buf bytes.Buffer
422 if ii != nil {
423 buf.WriteString(ii.HumanId() + "\n")
424 }
425 buf.WriteString(s + "\n")
426
427 dbug.WriteFile("hook-PostProvision", buf.Bytes())
428 return HookActionContinue, nil
429}
430
431func (*DebugHook) ProvisionOutput(ii *InstanceInfo, s1 string, s2 string) {
432 if dbug == nil {
433 return
434 }
435
436 var buf bytes.Buffer
437 if ii != nil {
438 buf.WriteString(ii.HumanId())
439 buf.WriteString("\n")
440 }
441 buf.WriteString(s1 + "\n")
442 buf.WriteString(s2 + "\n")
443
444 dbug.WriteFile("hook-ProvisionOutput", buf.Bytes())
445}
446
447func (*DebugHook) PreRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
448 if dbug == nil {
449 return HookActionContinue, nil
450 }
451
452 var buf bytes.Buffer
453 if ii != nil {
454 buf.WriteString(ii.HumanId() + "\n")
455 }
456
457 if is != nil {
458 buf.WriteString(is.String())
459 buf.WriteString("\n")
460 }
461 dbug.WriteFile("hook-PreRefresh", buf.Bytes())
462 return HookActionContinue, nil
463}
464
465func (*DebugHook) PostRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
466 if dbug == nil {
467 return HookActionContinue, nil
468 }
469
470 var buf bytes.Buffer
471 if ii != nil {
472 buf.WriteString(ii.HumanId())
473 buf.WriteString("\n")
474 }
475
476 if is != nil {
477 buf.WriteString(is.String())
478 buf.WriteString("\n")
479 }
480 dbug.WriteFile("hook-PostRefresh", buf.Bytes())
481 return HookActionContinue, nil
482}
483
484func (*DebugHook) PreImportState(ii *InstanceInfo, s string) (HookAction, error) {
485 if dbug == nil {
486 return HookActionContinue, nil
487 }
488
489 var buf bytes.Buffer
490 if ii != nil {
491 buf.WriteString(ii.HumanId())
492 buf.WriteString("\n")
493 }
494 buf.WriteString(s + "\n")
495
496 dbug.WriteFile("hook-PreImportState", buf.Bytes())
497 return HookActionContinue, nil
498}
499
500func (*DebugHook) PostImportState(ii *InstanceInfo, iss []*InstanceState) (HookAction, error) {
501 if dbug == nil {
502 return HookActionContinue, nil
503 }
504
505 var buf bytes.Buffer
506
507 if ii != nil {
508 buf.WriteString(ii.HumanId() + "\n")
509 }
510
511 for _, is := range iss {
512 if is != nil {
513 buf.WriteString(is.String() + "\n")
514 }
515 }
516 dbug.WriteFile("hook-PostImportState", buf.Bytes())
517 return HookActionContinue, nil
518}
519
520// skip logging this for now, since it could be huge
521func (*DebugHook) PostStateUpdate(*State) (HookAction, error) {
522 return HookActionContinue, nil
523}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go
new file mode 100644
index 0000000..a9fae6c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go
@@ -0,0 +1,866 @@
1package terraform
2
3import (
4 "bufio"
5 "bytes"
6 "fmt"
7 "reflect"
8 "regexp"
9 "sort"
10 "strings"
11 "sync"
12
13 "github.com/mitchellh/copystructure"
14)
15
16// DiffChangeType is an enum with the kind of changes a diff has planned.
17type DiffChangeType byte
18
19const (
20 DiffInvalid DiffChangeType = iota
21 DiffNone
22 DiffCreate
23 DiffUpdate
24 DiffDestroy
25 DiffDestroyCreate
26)
27
28// multiVal matches the index key to a flatmapped set, list or map
29var multiVal = regexp.MustCompile(`\.(#|%)$`)
30
31// Diff trackes the changes that are necessary to apply a configuration
32// to an existing infrastructure.
33type Diff struct {
34 // Modules contains all the modules that have a diff
35 Modules []*ModuleDiff
36}
37
38// Prune cleans out unused structures in the diff without affecting
39// the behavior of the diff at all.
40//
41// This is not safe to call concurrently. This is safe to call on a
42// nil Diff.
43func (d *Diff) Prune() {
44 if d == nil {
45 return
46 }
47
48 // Prune all empty modules
49 newModules := make([]*ModuleDiff, 0, len(d.Modules))
50 for _, m := range d.Modules {
51 // If the module isn't empty, we keep it
52 if !m.Empty() {
53 newModules = append(newModules, m)
54 }
55 }
56 if len(newModules) == 0 {
57 newModules = nil
58 }
59 d.Modules = newModules
60}
61
62// AddModule adds the module with the given path to the diff.
63//
64// This should be the preferred method to add module diffs since it
65// allows us to optimize lookups later as well as control sorting.
66func (d *Diff) AddModule(path []string) *ModuleDiff {
67 m := &ModuleDiff{Path: path}
68 m.init()
69 d.Modules = append(d.Modules, m)
70 return m
71}
72
73// ModuleByPath is used to lookup the module diff for the given path.
74// This should be the preferred lookup mechanism as it allows for future
75// lookup optimizations.
76func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
77 if d == nil {
78 return nil
79 }
80 for _, mod := range d.Modules {
81 if mod.Path == nil {
82 panic("missing module path")
83 }
84 if reflect.DeepEqual(mod.Path, path) {
85 return mod
86 }
87 }
88 return nil
89}
90
91// RootModule returns the ModuleState for the root module
92func (d *Diff) RootModule() *ModuleDiff {
93 root := d.ModuleByPath(rootModulePath)
94 if root == nil {
95 panic("missing root module")
96 }
97 return root
98}
99
100// Empty returns true if the diff has no changes.
101func (d *Diff) Empty() bool {
102 if d == nil {
103 return true
104 }
105
106 for _, m := range d.Modules {
107 if !m.Empty() {
108 return false
109 }
110 }
111
112 return true
113}
114
115// Equal compares two diffs for exact equality.
116//
117// This is different from the Same comparison that is supported which
118// checks for operation equality taking into account computed values. Equal
119// instead checks for exact equality.
120func (d *Diff) Equal(d2 *Diff) bool {
121 // If one is nil, they must both be nil
122 if d == nil || d2 == nil {
123 return d == d2
124 }
125
126 // Sort the modules
127 sort.Sort(moduleDiffSort(d.Modules))
128 sort.Sort(moduleDiffSort(d2.Modules))
129
130 // Copy since we have to modify the module destroy flag to false so
131 // we don't compare that. TODO: delete this when we get rid of the
132 // destroy flag on modules.
133 dCopy := d.DeepCopy()
134 d2Copy := d2.DeepCopy()
135 for _, m := range dCopy.Modules {
136 m.Destroy = false
137 }
138 for _, m := range d2Copy.Modules {
139 m.Destroy = false
140 }
141
142 // Use DeepEqual
143 return reflect.DeepEqual(dCopy, d2Copy)
144}
145
146// DeepCopy performs a deep copy of all parts of the Diff, making the
147// resulting Diff safe to use without modifying this one.
148func (d *Diff) DeepCopy() *Diff {
149 copy, err := copystructure.Config{Lock: true}.Copy(d)
150 if err != nil {
151 panic(err)
152 }
153
154 return copy.(*Diff)
155}
156
157func (d *Diff) String() string {
158 var buf bytes.Buffer
159
160 keys := make([]string, 0, len(d.Modules))
161 lookup := make(map[string]*ModuleDiff)
162 for _, m := range d.Modules {
163 key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], "."))
164 keys = append(keys, key)
165 lookup[key] = m
166 }
167 sort.Strings(keys)
168
169 for _, key := range keys {
170 m := lookup[key]
171 mStr := m.String()
172
173 // If we're the root module, we just write the output directly.
174 if reflect.DeepEqual(m.Path, rootModulePath) {
175 buf.WriteString(mStr + "\n")
176 continue
177 }
178
179 buf.WriteString(fmt.Sprintf("%s:\n", key))
180
181 s := bufio.NewScanner(strings.NewReader(mStr))
182 for s.Scan() {
183 buf.WriteString(fmt.Sprintf(" %s\n", s.Text()))
184 }
185 }
186
187 return strings.TrimSpace(buf.String())
188}
189
190func (d *Diff) init() {
191 if d.Modules == nil {
192 rootDiff := &ModuleDiff{Path: rootModulePath}
193 d.Modules = []*ModuleDiff{rootDiff}
194 }
195 for _, m := range d.Modules {
196 m.init()
197 }
198}
199
200// ModuleDiff tracks the differences between resources to apply within
201// a single module.
202type ModuleDiff struct {
203 Path []string
204 Resources map[string]*InstanceDiff
205 Destroy bool // Set only by the destroy plan
206}
207
208func (d *ModuleDiff) init() {
209 if d.Resources == nil {
210 d.Resources = make(map[string]*InstanceDiff)
211 }
212 for _, r := range d.Resources {
213 r.init()
214 }
215}
216
217// ChangeType returns the type of changes that the diff for this
218// module includes.
219//
220// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or
221// DiffCreate. If an instance within the module has a DiffDestroyCreate
222// then this will register as a DiffCreate for a module.
223func (d *ModuleDiff) ChangeType() DiffChangeType {
224 result := DiffNone
225 for _, r := range d.Resources {
226 change := r.ChangeType()
227 switch change {
228 case DiffCreate, DiffDestroy:
229 if result == DiffNone {
230 result = change
231 }
232 case DiffDestroyCreate, DiffUpdate:
233 result = DiffUpdate
234 }
235 }
236
237 return result
238}
239
240// Empty returns true if the diff has no changes within this module.
241func (d *ModuleDiff) Empty() bool {
242 if d.Destroy {
243 return false
244 }
245
246 if len(d.Resources) == 0 {
247 return true
248 }
249
250 for _, rd := range d.Resources {
251 if !rd.Empty() {
252 return false
253 }
254 }
255
256 return true
257}
258
259// Instances returns the instance diffs for the id given. This can return
260// multiple instance diffs if there are counts within the resource.
261func (d *ModuleDiff) Instances(id string) []*InstanceDiff {
262 var result []*InstanceDiff
263 for k, diff := range d.Resources {
264 if k == id || strings.HasPrefix(k, id+".") {
265 if !diff.Empty() {
266 result = append(result, diff)
267 }
268 }
269 }
270
271 return result
272}
273
274// IsRoot says whether or not this module diff is for the root module.
275func (d *ModuleDiff) IsRoot() bool {
276 return reflect.DeepEqual(d.Path, rootModulePath)
277}
278
279// String outputs the diff in a long but command-line friendly output
280// format that users can read to quickly inspect a diff.
281func (d *ModuleDiff) String() string {
282 var buf bytes.Buffer
283
284 names := make([]string, 0, len(d.Resources))
285 for name, _ := range d.Resources {
286 names = append(names, name)
287 }
288 sort.Strings(names)
289
290 for _, name := range names {
291 rdiff := d.Resources[name]
292
293 crud := "UPDATE"
294 switch {
295 case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()):
296 crud = "DESTROY/CREATE"
297 case rdiff.GetDestroy() || rdiff.GetDestroyDeposed():
298 crud = "DESTROY"
299 case rdiff.RequiresNew():
300 crud = "CREATE"
301 }
302
303 extra := ""
304 if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() {
305 extra = " (deposed only)"
306 }
307
308 buf.WriteString(fmt.Sprintf(
309 "%s: %s%s\n",
310 crud,
311 name,
312 extra))
313
314 keyLen := 0
315 rdiffAttrs := rdiff.CopyAttributes()
316 keys := make([]string, 0, len(rdiffAttrs))
317 for key, _ := range rdiffAttrs {
318 if key == "id" {
319 continue
320 }
321
322 keys = append(keys, key)
323 if len(key) > keyLen {
324 keyLen = len(key)
325 }
326 }
327 sort.Strings(keys)
328
329 for _, attrK := range keys {
330 attrDiff, _ := rdiff.GetAttribute(attrK)
331
332 v := attrDiff.New
333 u := attrDiff.Old
334 if attrDiff.NewComputed {
335 v = "<computed>"
336 }
337
338 if attrDiff.Sensitive {
339 u = "<sensitive>"
340 v = "<sensitive>"
341 }
342
343 updateMsg := ""
344 if attrDiff.RequiresNew {
345 updateMsg = " (forces new resource)"
346 } else if attrDiff.Sensitive {
347 updateMsg = " (attribute changed)"
348 }
349
350 buf.WriteString(fmt.Sprintf(
351 " %s:%s %#v => %#v%s\n",
352 attrK,
353 strings.Repeat(" ", keyLen-len(attrK)),
354 u,
355 v,
356 updateMsg))
357 }
358 }
359
360 return buf.String()
361}
362
363// InstanceDiff is the diff of a resource from some state to another.
364type InstanceDiff struct {
365 mu sync.Mutex
366 Attributes map[string]*ResourceAttrDiff
367 Destroy bool
368 DestroyDeposed bool
369 DestroyTainted bool
370
371 // Meta is a simple K/V map that is stored in a diff and persisted to
372 // plans but otherwise is completely ignored by Terraform core. It is
373 // mean to be used for additional data a resource may want to pass through.
374 // The value here must only contain Go primitives and collections.
375 Meta map[string]interface{}
376}
377
378func (d *InstanceDiff) Lock() { d.mu.Lock() }
379func (d *InstanceDiff) Unlock() { d.mu.Unlock() }
380
381// ResourceAttrDiff is the diff of a single attribute of a resource.
382type ResourceAttrDiff struct {
383 Old string // Old Value
384 New string // New Value
385 NewComputed bool // True if new value is computed (unknown currently)
386 NewRemoved bool // True if this attribute is being removed
387 NewExtra interface{} // Extra information for the provider
388 RequiresNew bool // True if change requires new resource
389 Sensitive bool // True if the data should not be displayed in UI output
390 Type DiffAttrType
391}
392
393// Empty returns true if the diff for this attr is neutral
394func (d *ResourceAttrDiff) Empty() bool {
395 return d.Old == d.New && !d.NewComputed && !d.NewRemoved
396}
397
398func (d *ResourceAttrDiff) GoString() string {
399 return fmt.Sprintf("*%#v", *d)
400}
401
402// DiffAttrType is an enum type that says whether a resource attribute
403// diff is an input attribute (comes from the configuration) or an
404// output attribute (comes as a result of applying the configuration). An
405// example input would be "ami" for AWS and an example output would be
406// "private_ip".
407type DiffAttrType byte
408
409const (
410 DiffAttrUnknown DiffAttrType = iota
411 DiffAttrInput
412 DiffAttrOutput
413)
414
415func (d *InstanceDiff) init() {
416 if d.Attributes == nil {
417 d.Attributes = make(map[string]*ResourceAttrDiff)
418 }
419}
420
421func NewInstanceDiff() *InstanceDiff {
422 return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)}
423}
424
425func (d *InstanceDiff) Copy() (*InstanceDiff, error) {
426 if d == nil {
427 return nil, nil
428 }
429
430 dCopy, err := copystructure.Config{Lock: true}.Copy(d)
431 if err != nil {
432 return nil, err
433 }
434
435 return dCopy.(*InstanceDiff), nil
436}
437
438// ChangeType returns the DiffChangeType represented by the diff
439// for this single instance.
440func (d *InstanceDiff) ChangeType() DiffChangeType {
441 if d.Empty() {
442 return DiffNone
443 }
444
445 if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) {
446 return DiffDestroyCreate
447 }
448
449 if d.GetDestroy() || d.GetDestroyDeposed() {
450 return DiffDestroy
451 }
452
453 if d.RequiresNew() {
454 return DiffCreate
455 }
456
457 return DiffUpdate
458}
459
460// Empty returns true if this diff encapsulates no changes.
461func (d *InstanceDiff) Empty() bool {
462 if d == nil {
463 return true
464 }
465
466 d.mu.Lock()
467 defer d.mu.Unlock()
468 return !d.Destroy &&
469 !d.DestroyTainted &&
470 !d.DestroyDeposed &&
471 len(d.Attributes) == 0
472}
473
474// Equal compares two diffs for exact equality.
475//
476// This is different from the Same comparison that is supported which
477// checks for operation equality taking into account computed values. Equal
478// instead checks for exact equality.
479func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool {
480 // If one is nil, they must both be nil
481 if d == nil || d2 == nil {
482 return d == d2
483 }
484
485 // Use DeepEqual
486 return reflect.DeepEqual(d, d2)
487}
488
489// DeepCopy performs a deep copy of all parts of the InstanceDiff
490func (d *InstanceDiff) DeepCopy() *InstanceDiff {
491 copy, err := copystructure.Config{Lock: true}.Copy(d)
492 if err != nil {
493 panic(err)
494 }
495
496 return copy.(*InstanceDiff)
497}
498
499func (d *InstanceDiff) GoString() string {
500 return fmt.Sprintf("*%#v", InstanceDiff{
501 Attributes: d.Attributes,
502 Destroy: d.Destroy,
503 DestroyTainted: d.DestroyTainted,
504 DestroyDeposed: d.DestroyDeposed,
505 })
506}
507
508// RequiresNew returns true if the diff requires the creation of a new
509// resource (implying the destruction of the old).
510func (d *InstanceDiff) RequiresNew() bool {
511 if d == nil {
512 return false
513 }
514
515 d.mu.Lock()
516 defer d.mu.Unlock()
517
518 return d.requiresNew()
519}
520
521func (d *InstanceDiff) requiresNew() bool {
522 if d == nil {
523 return false
524 }
525
526 if d.DestroyTainted {
527 return true
528 }
529
530 for _, rd := range d.Attributes {
531 if rd != nil && rd.RequiresNew {
532 return true
533 }
534 }
535
536 return false
537}
538
539func (d *InstanceDiff) GetDestroyDeposed() bool {
540 d.mu.Lock()
541 defer d.mu.Unlock()
542
543 return d.DestroyDeposed
544}
545
546func (d *InstanceDiff) SetDestroyDeposed(b bool) {
547 d.mu.Lock()
548 defer d.mu.Unlock()
549
550 d.DestroyDeposed = b
551}
552
553// These methods are properly locked, for use outside other InstanceDiff
554// methods but everywhere else within in the terraform package.
555// TODO refactor the locking scheme
556func (d *InstanceDiff) SetTainted(b bool) {
557 d.mu.Lock()
558 defer d.mu.Unlock()
559
560 d.DestroyTainted = b
561}
562
563func (d *InstanceDiff) GetDestroyTainted() bool {
564 d.mu.Lock()
565 defer d.mu.Unlock()
566
567 return d.DestroyTainted
568}
569
570func (d *InstanceDiff) SetDestroy(b bool) {
571 d.mu.Lock()
572 defer d.mu.Unlock()
573
574 d.Destroy = b
575}
576
577func (d *InstanceDiff) GetDestroy() bool {
578 d.mu.Lock()
579 defer d.mu.Unlock()
580
581 return d.Destroy
582}
583
584func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) {
585 d.mu.Lock()
586 defer d.mu.Unlock()
587
588 d.Attributes[key] = attr
589}
590
591func (d *InstanceDiff) DelAttribute(key string) {
592 d.mu.Lock()
593 defer d.mu.Unlock()
594
595 delete(d.Attributes, key)
596}
597
598func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) {
599 d.mu.Lock()
600 defer d.mu.Unlock()
601
602 attr, ok := d.Attributes[key]
603 return attr, ok
604}
605func (d *InstanceDiff) GetAttributesLen() int {
606 d.mu.Lock()
607 defer d.mu.Unlock()
608
609 return len(d.Attributes)
610}
611
612// Safely copies the Attributes map
613func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff {
614 d.mu.Lock()
615 defer d.mu.Unlock()
616
617 attrs := make(map[string]*ResourceAttrDiff)
618 for k, v := range d.Attributes {
619 attrs[k] = v
620 }
621
622 return attrs
623}
624
625// Same checks whether or not two InstanceDiff's are the "same". When
626// we say "same", it is not necessarily exactly equal. Instead, it is
627// just checking that the same attributes are changing, a destroy
628// isn't suddenly happening, etc.
629func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) {
630 // we can safely compare the pointers without a lock
631 switch {
632 case d == nil && d2 == nil:
633 return true, ""
634 case d == nil || d2 == nil:
635 return false, "one nil"
636 case d == d2:
637 return true, ""
638 }
639
640 d.mu.Lock()
641 defer d.mu.Unlock()
642
643 // If we're going from requiring new to NOT requiring new, then we have
644 // to see if all required news were computed. If so, it is allowed since
645 // computed may also mean "same value and therefore not new".
646 oldNew := d.requiresNew()
647 newNew := d2.RequiresNew()
648 if oldNew && !newNew {
649 oldNew = false
650
651 // This section builds a list of ignorable attributes for requiresNew
652 // by removing off any elements of collections going to zero elements.
653 // For collections going to zero, they may not exist at all in the
654 // new diff (and hence RequiresNew == false).
655 ignoreAttrs := make(map[string]struct{})
656 for k, diffOld := range d.Attributes {
657 if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") {
658 continue
659 }
660
661 // This case is in here as a protection measure. The bug that this
662 // code originally fixed (GH-11349) didn't have to deal with computed
663 // so I'm not 100% sure what the correct behavior is. Best to leave
664 // the old behavior.
665 if diffOld.NewComputed {
666 continue
667 }
668
669 // We're looking for the case a map goes to exactly 0.
670 if diffOld.New != "0" {
671 continue
672 }
673
674 // Found it! Ignore all of these. The prefix here is stripping
675 // off the "%" so it is just "k."
676 prefix := k[:len(k)-1]
677 for k2, _ := range d.Attributes {
678 if strings.HasPrefix(k2, prefix) {
679 ignoreAttrs[k2] = struct{}{}
680 }
681 }
682 }
683
684 for k, rd := range d.Attributes {
685 if _, ok := ignoreAttrs[k]; ok {
686 continue
687 }
688
689 // If the field is requires new and NOT computed, then what
690 // we have is a diff mismatch for sure. We set that the old
691 // diff does REQUIRE a ForceNew.
692 if rd != nil && rd.RequiresNew && !rd.NewComputed {
693 oldNew = true
694 break
695 }
696 }
697 }
698
699 if oldNew != newNew {
700 return false, fmt.Sprintf(
701 "diff RequiresNew; old: %t, new: %t", oldNew, newNew)
702 }
703
704 // Verify that destroy matches. The second boolean here allows us to
705 // have mismatching Destroy if we're moving from RequiresNew true
706 // to false above. Therefore, the second boolean will only pass if
707 // we're moving from Destroy: true to false as well.
708 if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew {
709 return false, fmt.Sprintf(
710 "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy())
711 }
712
713 // Go through the old diff and make sure the new diff has all the
714 // same attributes. To start, build up the check map to be all the keys.
715 checkOld := make(map[string]struct{})
716 checkNew := make(map[string]struct{})
717 for k, _ := range d.Attributes {
718 checkOld[k] = struct{}{}
719 }
720 for k, _ := range d2.CopyAttributes() {
721 checkNew[k] = struct{}{}
722 }
723
724 // Make an ordered list so we are sure the approximated hashes are left
725 // to process at the end of the loop
726 keys := make([]string, 0, len(d.Attributes))
727 for k, _ := range d.Attributes {
728 keys = append(keys, k)
729 }
730 sort.StringSlice(keys).Sort()
731
732 for _, k := range keys {
733 diffOld := d.Attributes[k]
734
735 if _, ok := checkOld[k]; !ok {
736 // We're not checking this key for whatever reason (see where
737 // check is modified).
738 continue
739 }
740
741 // Remove this key since we'll never hit it again
742 delete(checkOld, k)
743 delete(checkNew, k)
744
745 _, ok := d2.GetAttribute(k)
746 if !ok {
747 // If there's no new attribute, and the old diff expected the attribute
748 // to be removed, that's just fine.
749 if diffOld.NewRemoved {
750 continue
751 }
752
753 // If the last diff was a computed value then the absense of
754 // that value is allowed since it may mean the value ended up
755 // being the same.
756 if diffOld.NewComputed {
757 ok = true
758 }
759
760 // No exact match, but maybe this is a set containing computed
761 // values. So check if there is an approximate hash in the key
762 // and if so, try to match the key.
763 if strings.Contains(k, "~") {
764 parts := strings.Split(k, ".")
765 parts2 := append([]string(nil), parts...)
766
767 re := regexp.MustCompile(`^~\d+$`)
768 for i, part := range parts {
769 if re.MatchString(part) {
770 // we're going to consider this the base of a
771 // computed hash, and remove all longer matching fields
772 ok = true
773
774 parts2[i] = `\d+`
775 parts2 = parts2[:i+1]
776 break
777 }
778 }
779
780 re, err := regexp.Compile("^" + strings.Join(parts2, `\.`))
781 if err != nil {
782 return false, fmt.Sprintf("regexp failed to compile; err: %#v", err)
783 }
784
785 for k2, _ := range checkNew {
786 if re.MatchString(k2) {
787 delete(checkNew, k2)
788 }
789 }
790 }
791
792 // This is a little tricky, but when a diff contains a computed
793 // list, set, or map that can only be interpolated after the apply
794 // command has created the dependent resources, it could turn out
795 // that the result is actually the same as the existing state which
796 // would remove the key from the diff.
797 if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
798 ok = true
799 }
800
801 // Similarly, in a RequiresNew scenario, a list that shows up in the plan
802 // diff can disappear from the apply diff, which is calculated from an
803 // empty state.
804 if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
805 ok = true
806 }
807
808 if !ok {
809 return false, fmt.Sprintf("attribute mismatch: %s", k)
810 }
811 }
812
813 // search for the suffix of the base of a [computed] map, list or set.
814 match := multiVal.FindStringSubmatch(k)
815
816 if diffOld.NewComputed && len(match) == 2 {
817 matchLen := len(match[1])
818
819 // This is a computed list, set, or map, so remove any keys with
820 // this prefix from the check list.
821 kprefix := k[:len(k)-matchLen]
822 for k2, _ := range checkOld {
823 if strings.HasPrefix(k2, kprefix) {
824 delete(checkOld, k2)
825 }
826 }
827 for k2, _ := range checkNew {
828 if strings.HasPrefix(k2, kprefix) {
829 delete(checkNew, k2)
830 }
831 }
832 }
833
834 // TODO: check for the same value if not computed
835 }
836
837 // Check for leftover attributes
838 if len(checkNew) > 0 {
839 extras := make([]string, 0, len(checkNew))
840 for attr, _ := range checkNew {
841 extras = append(extras, attr)
842 }
843 return false,
844 fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", "))
845 }
846
847 return true, ""
848}
849
850// moduleDiffSort implements sort.Interface to sort module diffs by path.
851type moduleDiffSort []*ModuleDiff
852
853func (s moduleDiffSort) Len() int { return len(s) }
854func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
855func (s moduleDiffSort) Less(i, j int) bool {
856 a := s[i]
857 b := s[j]
858
859 // If the lengths are different, then the shorter one always wins
860 if len(a.Path) != len(b.Path) {
861 return len(a.Path) < len(b.Path)
862 }
863
864 // Otherwise, compare lexically
865 return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
866}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go
new file mode 100644
index 0000000..bc9d638
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go
@@ -0,0 +1,17 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// DestroyEdge is an edge that represents a standard "destroy" relationship:
10// Target depends on Source because Source is destroying.
11type DestroyEdge struct {
12 S, T dag.Vertex
13}
14
15func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) }
16func (e *DestroyEdge) Source() dag.Vertex { return e.S }
17func (e *DestroyEdge) Target() dag.Vertex { return e.T }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go
new file mode 100644
index 0000000..3cb088a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go
@@ -0,0 +1,63 @@
1package terraform
2
3import (
4 "log"
5 "strings"
6)
7
8// EvalNode is the interface that must be implemented by graph nodes to
9// evaluate/execute.
10type EvalNode interface {
11 // Eval evaluates this node with the given context. The second parameter
12 // are the argument values. These will match in order and 1-1 with the
13 // results of the Args() return value.
14 Eval(EvalContext) (interface{}, error)
15}
16
17// GraphNodeEvalable is the interface that graph nodes must implement
18// to enable valuation.
19type GraphNodeEvalable interface {
20 EvalTree() EvalNode
21}
22
23// EvalEarlyExitError is a special error return value that can be returned
24// by eval nodes that does an early exit.
25type EvalEarlyExitError struct{}
26
27func (EvalEarlyExitError) Error() string { return "early exit" }
28
29// Eval evaluates the given EvalNode with the given context, properly
30// evaluating all args in the correct order.
31func Eval(n EvalNode, ctx EvalContext) (interface{}, error) {
32 // Call the lower level eval which doesn't understand early exit,
33 // and if we early exit, it isn't an error.
34 result, err := EvalRaw(n, ctx)
35 if err != nil {
36 if _, ok := err.(EvalEarlyExitError); ok {
37 return nil, nil
38 }
39 }
40
41 return result, err
42}
43
44// EvalRaw is like Eval except that it returns all errors, even if they
45// signal something normal such as EvalEarlyExitError.
46func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) {
47 path := "unknown"
48 if ctx != nil {
49 path = strings.Join(ctx.Path(), ".")
50 }
51
52 log.Printf("[DEBUG] %s: eval: %T", path, n)
53 output, err := n.Eval(ctx)
54 if err != nil {
55 if _, ok := err.(EvalEarlyExitError); ok {
56 log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err)
57 } else {
58 log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err)
59 }
60 }
61
62 return output, err
63}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
new file mode 100644
index 0000000..2f6a497
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
@@ -0,0 +1,359 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strconv"
7
8 "github.com/hashicorp/go-multierror"
9 "github.com/hashicorp/terraform/config"
10)
11
12// EvalApply is an EvalNode implementation that writes the diff to
13// the full diff.
14type EvalApply struct {
15 Info *InstanceInfo
16 State **InstanceState
17 Diff **InstanceDiff
18 Provider *ResourceProvider
19 Output **InstanceState
20 CreateNew *bool
21 Error *error
22}
23
24// TODO: test
25func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
26 diff := *n.Diff
27 provider := *n.Provider
28 state := *n.State
29
30 // If we have no diff, we have nothing to do!
31 if diff.Empty() {
32 log.Printf(
33 "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id)
34 return nil, nil
35 }
36
37 // Remove any output values from the diff
38 for k, ad := range diff.CopyAttributes() {
39 if ad.Type == DiffAttrOutput {
40 diff.DelAttribute(k)
41 }
42 }
43
44 // If the state is nil, make it non-nil
45 if state == nil {
46 state = new(InstanceState)
47 }
48 state.init()
49
50 // Flag if we're creating a new instance
51 if n.CreateNew != nil {
52 *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew()
53 }
54
55 // With the completed diff, apply!
56 log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id)
57 state, err := provider.Apply(n.Info, state, diff)
58 if state == nil {
59 state = new(InstanceState)
60 }
61 state.init()
62
63 // Force the "id" attribute to be our ID
64 if state.ID != "" {
65 state.Attributes["id"] = state.ID
66 }
67
68 // If the value is the unknown variable value, then it is an error.
69 // In this case we record the error and remove it from the state
70 for ak, av := range state.Attributes {
71 if av == config.UnknownVariableValue {
72 err = multierror.Append(err, fmt.Errorf(
73 "Attribute with unknown value: %s", ak))
74 delete(state.Attributes, ak)
75 }
76 }
77
78 // Write the final state
79 if n.Output != nil {
80 *n.Output = state
81 }
82
83 // If there are no errors, then we append it to our output error
84 // if we have one, otherwise we just output it.
85 if err != nil {
86 if n.Error != nil {
87 helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error())
88 *n.Error = multierror.Append(*n.Error, helpfulErr)
89 } else {
90 return nil, err
91 }
92 }
93
94 return nil, nil
95}
96
97// EvalApplyPre is an EvalNode implementation that does the pre-Apply work
98type EvalApplyPre struct {
99 Info *InstanceInfo
100 State **InstanceState
101 Diff **InstanceDiff
102}
103
104// TODO: test
105func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
106 state := *n.State
107 diff := *n.Diff
108
109 // If the state is nil, make it non-nil
110 if state == nil {
111 state = new(InstanceState)
112 }
113 state.init()
114
115 {
116 // Call post-apply hook
117 err := ctx.Hook(func(h Hook) (HookAction, error) {
118 return h.PreApply(n.Info, state, diff)
119 })
120 if err != nil {
121 return nil, err
122 }
123 }
124
125 return nil, nil
126}
127
128// EvalApplyPost is an EvalNode implementation that does the post-Apply work
129type EvalApplyPost struct {
130 Info *InstanceInfo
131 State **InstanceState
132 Error *error
133}
134
135// TODO: test
136func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) {
137 state := *n.State
138
139 {
140 // Call post-apply hook
141 err := ctx.Hook(func(h Hook) (HookAction, error) {
142 return h.PostApply(n.Info, state, *n.Error)
143 })
144 if err != nil {
145 return nil, err
146 }
147 }
148
149 return nil, *n.Error
150}
151
152// EvalApplyProvisioners is an EvalNode implementation that executes
153// the provisioners for a resource.
154//
155// TODO(mitchellh): This should probably be split up into a more fine-grained
156// ApplyProvisioner (single) that is looped over.
157type EvalApplyProvisioners struct {
158 Info *InstanceInfo
159 State **InstanceState
160 Resource *config.Resource
161 InterpResource *Resource
162 CreateNew *bool
163 Error *error
164
165 // When is the type of provisioner to run at this point
166 When config.ProvisionerWhen
167}
168
169// TODO: test
170func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
171 state := *n.State
172
173 if n.CreateNew != nil && !*n.CreateNew {
174 // If we're not creating a new resource, then don't run provisioners
175 return nil, nil
176 }
177
178 provs := n.filterProvisioners()
179 if len(provs) == 0 {
180 // We have no provisioners, so don't do anything
181 return nil, nil
182 }
183
184 // taint tells us whether to enable tainting.
185 taint := n.When == config.ProvisionerWhenCreate
186
187 if n.Error != nil && *n.Error != nil {
188 if taint {
189 state.Tainted = true
190 }
191
192 // We're already tainted, so just return out
193 return nil, nil
194 }
195
196 {
197 // Call pre hook
198 err := ctx.Hook(func(h Hook) (HookAction, error) {
199 return h.PreProvisionResource(n.Info, state)
200 })
201 if err != nil {
202 return nil, err
203 }
204 }
205
206 // If there are no errors, then we append it to our output error
207 // if we have one, otherwise we just output it.
208 err := n.apply(ctx, provs)
209 if err != nil {
210 if taint {
211 state.Tainted = true
212 }
213
214 if n.Error != nil {
215 *n.Error = multierror.Append(*n.Error, err)
216 } else {
217 return nil, err
218 }
219 }
220
221 {
222 // Call post hook
223 err := ctx.Hook(func(h Hook) (HookAction, error) {
224 return h.PostProvisionResource(n.Info, state)
225 })
226 if err != nil {
227 return nil, err
228 }
229 }
230
231 return nil, nil
232}
233
234// filterProvisioners filters the provisioners on the resource to only
235// the provisioners specified by the "when" option.
236func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner {
237 // Fast path the zero case
238 if n.Resource == nil {
239 return nil
240 }
241
242 if len(n.Resource.Provisioners) == 0 {
243 return nil
244 }
245
246 result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners))
247 for _, p := range n.Resource.Provisioners {
248 if p.When == n.When {
249 result = append(result, p)
250 }
251 }
252
253 return result
254}
255
256func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error {
257 state := *n.State
258
259 // Store the original connection info, restore later
260 origConnInfo := state.Ephemeral.ConnInfo
261 defer func() {
262 state.Ephemeral.ConnInfo = origConnInfo
263 }()
264
265 for _, prov := range provs {
266 // Get the provisioner
267 provisioner := ctx.Provisioner(prov.Type)
268
269 // Interpolate the provisioner config
270 provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource)
271 if err != nil {
272 return err
273 }
274
275 // Interpolate the conn info, since it may contain variables
276 connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource)
277 if err != nil {
278 return err
279 }
280
281 // Merge the connection information
282 overlay := make(map[string]string)
283 if origConnInfo != nil {
284 for k, v := range origConnInfo {
285 overlay[k] = v
286 }
287 }
288 for k, v := range connInfo.Config {
289 switch vt := v.(type) {
290 case string:
291 overlay[k] = vt
292 case int64:
293 overlay[k] = strconv.FormatInt(vt, 10)
294 case int32:
295 overlay[k] = strconv.FormatInt(int64(vt), 10)
296 case int:
297 overlay[k] = strconv.FormatInt(int64(vt), 10)
298 case float32:
299 overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32)
300 case float64:
301 overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64)
302 case bool:
303 overlay[k] = strconv.FormatBool(vt)
304 default:
305 overlay[k] = fmt.Sprintf("%v", vt)
306 }
307 }
308 state.Ephemeral.ConnInfo = overlay
309
310 {
311 // Call pre hook
312 err := ctx.Hook(func(h Hook) (HookAction, error) {
313 return h.PreProvision(n.Info, prov.Type)
314 })
315 if err != nil {
316 return err
317 }
318 }
319
320 // The output function
321 outputFn := func(msg string) {
322 ctx.Hook(func(h Hook) (HookAction, error) {
323 h.ProvisionOutput(n.Info, prov.Type, msg)
324 return HookActionContinue, nil
325 })
326 }
327
328 // Invoke the Provisioner
329 output := CallbackUIOutput{OutputFn: outputFn}
330 applyErr := provisioner.Apply(&output, state, provConfig)
331
332 // Call post hook
333 hookErr := ctx.Hook(func(h Hook) (HookAction, error) {
334 return h.PostProvision(n.Info, prov.Type, applyErr)
335 })
336
337 // Handle the error before we deal with the hook
338 if applyErr != nil {
339 // Determine failure behavior
340 switch prov.OnFailure {
341 case config.ProvisionerOnFailureContinue:
342 log.Printf(
343 "[INFO] apply: %s [%s]: error during provision, continue requested",
344 n.Info.Id, prov.Type)
345
346 case config.ProvisionerOnFailureFail:
347 return applyErr
348 }
349 }
350
351 // Deal with the hook
352 if hookErr != nil {
353 return hookErr
354 }
355 }
356
357 return nil
358
359}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
new file mode 100644
index 0000000..715e79e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
@@ -0,0 +1,38 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalPreventDestroy is an EvalNode implementation that returns an
10// error if a resource has PreventDestroy configured and the diff
11// would destroy the resource.
12type EvalCheckPreventDestroy struct {
13 Resource *config.Resource
14 ResourceId string
15 Diff **InstanceDiff
16}
17
18func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) {
19 if n.Diff == nil || *n.Diff == nil || n.Resource == nil {
20 return nil, nil
21 }
22
23 diff := *n.Diff
24 preventDestroy := n.Resource.Lifecycle.PreventDestroy
25
26 if diff.GetDestroy() && preventDestroy {
27 resourceId := n.ResourceId
28 if resourceId == "" {
29 resourceId = n.Resource.Id()
30 }
31
32 return nil, fmt.Errorf(preventDestroyErrStr, resourceId)
33 }
34
35 return nil, nil
36}
37
38const preventDestroyErrStr = `%s: the plan would destroy this resource, but it currently has lifecycle.prevent_destroy set to true. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or adjust the scope of the plan using the -target flag.`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
new file mode 100644
index 0000000..a1f815b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
@@ -0,0 +1,84 @@
1package terraform
2
3import (
4 "sync"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalContext is the interface that is given to eval nodes to execute.
10type EvalContext interface {
11 // Stopped returns a channel that is closed when evaluation is stopped
12 // via Terraform.Context.Stop()
13 Stopped() <-chan struct{}
14
15 // Path is the current module path.
16 Path() []string
17
18 // Hook is used to call hook methods. The callback is called for each
19 // hook and should return the hook action to take and the error.
20 Hook(func(Hook) (HookAction, error)) error
21
22 // Input is the UIInput object for interacting with the UI.
23 Input() UIInput
24
25 // InitProvider initializes the provider with the given name and
26 // returns the implementation of the resource provider or an error.
27 //
28 // It is an error to initialize the same provider more than once.
29 InitProvider(string) (ResourceProvider, error)
30
31 // Provider gets the provider instance with the given name (already
32 // initialized) or returns nil if the provider isn't initialized.
33 Provider(string) ResourceProvider
34
35 // CloseProvider closes provider connections that aren't needed anymore.
36 CloseProvider(string) error
37
38 // ConfigureProvider configures the provider with the given
39 // configuration. This is a separate context call because this call
40 // is used to store the provider configuration for inheritance lookups
41 // with ParentProviderConfig().
42 ConfigureProvider(string, *ResourceConfig) error
43 SetProviderConfig(string, *ResourceConfig) error
44 ParentProviderConfig(string) *ResourceConfig
45
46 // ProviderInput and SetProviderInput are used to configure providers
47 // from user input.
48 ProviderInput(string) map[string]interface{}
49 SetProviderInput(string, map[string]interface{})
50
51 // InitProvisioner initializes the provisioner with the given name and
52 // returns the implementation of the resource provisioner or an error.
53 //
54 // It is an error to initialize the same provisioner more than once.
55 InitProvisioner(string) (ResourceProvisioner, error)
56
57 // Provisioner gets the provisioner instance with the given name (already
58 // initialized) or returns nil if the provisioner isn't initialized.
59 Provisioner(string) ResourceProvisioner
60
61 // CloseProvisioner closes provisioner connections that aren't needed
62 // anymore.
63 CloseProvisioner(string) error
64
65 // Interpolate takes the given raw configuration and completes
66 // the interpolations, returning the processed ResourceConfig.
67 //
68 // The resource argument is optional. If given, it is the resource
69 // that is currently being acted upon.
70 Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error)
71
72 // SetVariables sets the variables for the module within
73 // this context with the name n. This function call is additive:
74 // the second parameter is merged with any previous call.
75 SetVariables(string, map[string]interface{})
76
77 // Diff returns the global diff as well as the lock that should
78 // be used to modify that diff.
79 Diff() (*Diff, *sync.RWMutex)
80
81 // State returns the global state as well as the lock that should
82 // be used to modify that state.
83 State() (*State, *sync.RWMutex)
84}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
new file mode 100644
index 0000000..3dcfb22
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
@@ -0,0 +1,347 @@
1package terraform
2
3import (
4 "context"
5 "fmt"
6 "log"
7 "strings"
8 "sync"
9
10 "github.com/hashicorp/terraform/config"
11)
12
13// BuiltinEvalContext is an EvalContext implementation that is used by
14// Terraform by default.
15type BuiltinEvalContext struct {
16 // StopContext is the context used to track whether we're complete
17 StopContext context.Context
18
19 // PathValue is the Path that this context is operating within.
20 PathValue []string
21
22 // Interpolater setting below affect the interpolation of variables.
23 //
24 // The InterpolaterVars are the exact value for ${var.foo} values.
25 // The map is shared between all contexts and is a mapping of
26 // PATH to KEY to VALUE. Because it is shared by all contexts as well
27 // as the Interpolater itself, it is protected by InterpolaterVarLock
28 // which must be locked during any access to the map.
29 Interpolater *Interpolater
30 InterpolaterVars map[string]map[string]interface{}
31 InterpolaterVarLock *sync.Mutex
32
33 Components contextComponentFactory
34 Hooks []Hook
35 InputValue UIInput
36 ProviderCache map[string]ResourceProvider
37 ProviderConfigCache map[string]*ResourceConfig
38 ProviderInputConfig map[string]map[string]interface{}
39 ProviderLock *sync.Mutex
40 ProvisionerCache map[string]ResourceProvisioner
41 ProvisionerLock *sync.Mutex
42 DiffValue *Diff
43 DiffLock *sync.RWMutex
44 StateValue *State
45 StateLock *sync.RWMutex
46
47 once sync.Once
48}
49
50func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} {
51 // This can happen during tests. During tests, we just block forever.
52 if ctx.StopContext == nil {
53 return nil
54 }
55
56 return ctx.StopContext.Done()
57}
58
59func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
60 for _, h := range ctx.Hooks {
61 action, err := fn(h)
62 if err != nil {
63 return err
64 }
65
66 switch action {
67 case HookActionContinue:
68 continue
69 case HookActionHalt:
70 // Return an early exit error to trigger an early exit
71 log.Printf("[WARN] Early exit triggered by hook: %T", h)
72 return EvalEarlyExitError{}
73 }
74 }
75
76 return nil
77}
78
79func (ctx *BuiltinEvalContext) Input() UIInput {
80 return ctx.InputValue
81}
82
83func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) {
84 ctx.once.Do(ctx.init)
85
86 // If we already initialized, it is an error
87 if p := ctx.Provider(n); p != nil {
88 return nil, fmt.Errorf("Provider '%s' already initialized", n)
89 }
90
91 // Warning: make sure to acquire these locks AFTER the call to Provider
92 // above, since it also acquires locks.
93 ctx.ProviderLock.Lock()
94 defer ctx.ProviderLock.Unlock()
95
96 providerPath := make([]string, len(ctx.Path())+1)
97 copy(providerPath, ctx.Path())
98 providerPath[len(providerPath)-1] = n
99 key := PathCacheKey(providerPath)
100
101 typeName := strings.SplitN(n, ".", 2)[0]
102 p, err := ctx.Components.ResourceProvider(typeName, key)
103 if err != nil {
104 return nil, err
105 }
106
107 ctx.ProviderCache[key] = p
108 return p, nil
109}
110
111func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider {
112 ctx.once.Do(ctx.init)
113
114 ctx.ProviderLock.Lock()
115 defer ctx.ProviderLock.Unlock()
116
117 providerPath := make([]string, len(ctx.Path())+1)
118 copy(providerPath, ctx.Path())
119 providerPath[len(providerPath)-1] = n
120
121 return ctx.ProviderCache[PathCacheKey(providerPath)]
122}
123
124func (ctx *BuiltinEvalContext) CloseProvider(n string) error {
125 ctx.once.Do(ctx.init)
126
127 ctx.ProviderLock.Lock()
128 defer ctx.ProviderLock.Unlock()
129
130 providerPath := make([]string, len(ctx.Path())+1)
131 copy(providerPath, ctx.Path())
132 providerPath[len(providerPath)-1] = n
133
134 var provider interface{}
135 provider = ctx.ProviderCache[PathCacheKey(providerPath)]
136 if provider != nil {
137 if p, ok := provider.(ResourceProviderCloser); ok {
138 delete(ctx.ProviderCache, PathCacheKey(providerPath))
139 return p.Close()
140 }
141 }
142
143 return nil
144}
145
146func (ctx *BuiltinEvalContext) ConfigureProvider(
147 n string, cfg *ResourceConfig) error {
148 p := ctx.Provider(n)
149 if p == nil {
150 return fmt.Errorf("Provider '%s' not initialized", n)
151 }
152
153 if err := ctx.SetProviderConfig(n, cfg); err != nil {
154 return nil
155 }
156
157 return p.Configure(cfg)
158}
159
160func (ctx *BuiltinEvalContext) SetProviderConfig(
161 n string, cfg *ResourceConfig) error {
162 providerPath := make([]string, len(ctx.Path())+1)
163 copy(providerPath, ctx.Path())
164 providerPath[len(providerPath)-1] = n
165
166 // Save the configuration
167 ctx.ProviderLock.Lock()
168 ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg
169 ctx.ProviderLock.Unlock()
170
171 return nil
172}
173
174func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} {
175 ctx.ProviderLock.Lock()
176 defer ctx.ProviderLock.Unlock()
177
178 // Make a copy of the path so we can safely edit it
179 path := ctx.Path()
180 pathCopy := make([]string, len(path)+1)
181 copy(pathCopy, path)
182
183 // Go up the tree.
184 for i := len(path) - 1; i >= 0; i-- {
185 pathCopy[i+1] = n
186 k := PathCacheKey(pathCopy[:i+2])
187 if v, ok := ctx.ProviderInputConfig[k]; ok {
188 return v
189 }
190 }
191
192 return nil
193}
194
195func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) {
196 providerPath := make([]string, len(ctx.Path())+1)
197 copy(providerPath, ctx.Path())
198 providerPath[len(providerPath)-1] = n
199
200 // Save the configuration
201 ctx.ProviderLock.Lock()
202 ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c
203 ctx.ProviderLock.Unlock()
204}
205
206func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig {
207 ctx.ProviderLock.Lock()
208 defer ctx.ProviderLock.Unlock()
209
210 // Make a copy of the path so we can safely edit it
211 path := ctx.Path()
212 pathCopy := make([]string, len(path)+1)
213 copy(pathCopy, path)
214
215 // Go up the tree.
216 for i := len(path) - 1; i >= 0; i-- {
217 pathCopy[i+1] = n
218 k := PathCacheKey(pathCopy[:i+2])
219 if v, ok := ctx.ProviderConfigCache[k]; ok {
220 return v
221 }
222 }
223
224 return nil
225}
226
227func (ctx *BuiltinEvalContext) InitProvisioner(
228 n string) (ResourceProvisioner, error) {
229 ctx.once.Do(ctx.init)
230
231 // If we already initialized, it is an error
232 if p := ctx.Provisioner(n); p != nil {
233 return nil, fmt.Errorf("Provisioner '%s' already initialized", n)
234 }
235
236 // Warning: make sure to acquire these locks AFTER the call to Provisioner
237 // above, since it also acquires locks.
238 ctx.ProvisionerLock.Lock()
239 defer ctx.ProvisionerLock.Unlock()
240
241 provPath := make([]string, len(ctx.Path())+1)
242 copy(provPath, ctx.Path())
243 provPath[len(provPath)-1] = n
244 key := PathCacheKey(provPath)
245
246 p, err := ctx.Components.ResourceProvisioner(n, key)
247 if err != nil {
248 return nil, err
249 }
250
251 ctx.ProvisionerCache[key] = p
252 return p, nil
253}
254
255func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner {
256 ctx.once.Do(ctx.init)
257
258 ctx.ProvisionerLock.Lock()
259 defer ctx.ProvisionerLock.Unlock()
260
261 provPath := make([]string, len(ctx.Path())+1)
262 copy(provPath, ctx.Path())
263 provPath[len(provPath)-1] = n
264
265 return ctx.ProvisionerCache[PathCacheKey(provPath)]
266}
267
268func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
269 ctx.once.Do(ctx.init)
270
271 ctx.ProvisionerLock.Lock()
272 defer ctx.ProvisionerLock.Unlock()
273
274 provPath := make([]string, len(ctx.Path())+1)
275 copy(provPath, ctx.Path())
276 provPath[len(provPath)-1] = n
277
278 var prov interface{}
279 prov = ctx.ProvisionerCache[PathCacheKey(provPath)]
280 if prov != nil {
281 if p, ok := prov.(ResourceProvisionerCloser); ok {
282 delete(ctx.ProvisionerCache, PathCacheKey(provPath))
283 return p.Close()
284 }
285 }
286
287 return nil
288}
289
290func (ctx *BuiltinEvalContext) Interpolate(
291 cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) {
292 if cfg != nil {
293 scope := &InterpolationScope{
294 Path: ctx.Path(),
295 Resource: r,
296 }
297
298 vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
299 if err != nil {
300 return nil, err
301 }
302
303 // Do the interpolation
304 if err := cfg.Interpolate(vs); err != nil {
305 return nil, err
306 }
307 }
308
309 result := NewResourceConfig(cfg)
310 result.interpolateForce()
311 return result, nil
312}
313
314func (ctx *BuiltinEvalContext) Path() []string {
315 return ctx.PathValue
316}
317
318func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) {
319 ctx.InterpolaterVarLock.Lock()
320 defer ctx.InterpolaterVarLock.Unlock()
321
322 path := make([]string, len(ctx.Path())+1)
323 copy(path, ctx.Path())
324 path[len(path)-1] = n
325 key := PathCacheKey(path)
326
327 vars := ctx.InterpolaterVars[key]
328 if vars == nil {
329 vars = make(map[string]interface{})
330 ctx.InterpolaterVars[key] = vars
331 }
332
333 for k, v := range vs {
334 vars[k] = v
335 }
336}
337
338func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) {
339 return ctx.DiffValue, ctx.DiffLock
340}
341
342func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) {
343 return ctx.StateValue, ctx.StateLock
344}
345
346func (ctx *BuiltinEvalContext) init() {
347}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
new file mode 100644
index 0000000..4f90d5b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
@@ -0,0 +1,208 @@
1package terraform
2
3import (
4 "sync"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// MockEvalContext is a mock version of EvalContext that can be used
10// for tests.
11type MockEvalContext struct {
12 StoppedCalled bool
13 StoppedValue <-chan struct{}
14
15 HookCalled bool
16 HookHook Hook
17 HookError error
18
19 InputCalled bool
20 InputInput UIInput
21
22 InitProviderCalled bool
23 InitProviderName string
24 InitProviderProvider ResourceProvider
25 InitProviderError error
26
27 ProviderCalled bool
28 ProviderName string
29 ProviderProvider ResourceProvider
30
31 CloseProviderCalled bool
32 CloseProviderName string
33 CloseProviderProvider ResourceProvider
34
35 ProviderInputCalled bool
36 ProviderInputName string
37 ProviderInputConfig map[string]interface{}
38
39 SetProviderInputCalled bool
40 SetProviderInputName string
41 SetProviderInputConfig map[string]interface{}
42
43 ConfigureProviderCalled bool
44 ConfigureProviderName string
45 ConfigureProviderConfig *ResourceConfig
46 ConfigureProviderError error
47
48 SetProviderConfigCalled bool
49 SetProviderConfigName string
50 SetProviderConfigConfig *ResourceConfig
51
52 ParentProviderConfigCalled bool
53 ParentProviderConfigName string
54 ParentProviderConfigConfig *ResourceConfig
55
56 InitProvisionerCalled bool
57 InitProvisionerName string
58 InitProvisionerProvisioner ResourceProvisioner
59 InitProvisionerError error
60
61 ProvisionerCalled bool
62 ProvisionerName string
63 ProvisionerProvisioner ResourceProvisioner
64
65 CloseProvisionerCalled bool
66 CloseProvisionerName string
67 CloseProvisionerProvisioner ResourceProvisioner
68
69 InterpolateCalled bool
70 InterpolateConfig *config.RawConfig
71 InterpolateResource *Resource
72 InterpolateConfigResult *ResourceConfig
73 InterpolateError error
74
75 PathCalled bool
76 PathPath []string
77
78 SetVariablesCalled bool
79 SetVariablesModule string
80 SetVariablesVariables map[string]interface{}
81
82 DiffCalled bool
83 DiffDiff *Diff
84 DiffLock *sync.RWMutex
85
86 StateCalled bool
87 StateState *State
88 StateLock *sync.RWMutex
89}
90
91func (c *MockEvalContext) Stopped() <-chan struct{} {
92 c.StoppedCalled = true
93 return c.StoppedValue
94}
95
96func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
97 c.HookCalled = true
98 if c.HookHook != nil {
99 if _, err := fn(c.HookHook); err != nil {
100 return err
101 }
102 }
103
104 return c.HookError
105}
106
107func (c *MockEvalContext) Input() UIInput {
108 c.InputCalled = true
109 return c.InputInput
110}
111
112func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) {
113 c.InitProviderCalled = true
114 c.InitProviderName = n
115 return c.InitProviderProvider, c.InitProviderError
116}
117
118func (c *MockEvalContext) Provider(n string) ResourceProvider {
119 c.ProviderCalled = true
120 c.ProviderName = n
121 return c.ProviderProvider
122}
123
124func (c *MockEvalContext) CloseProvider(n string) error {
125 c.CloseProviderCalled = true
126 c.CloseProviderName = n
127 return nil
128}
129
130func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error {
131 c.ConfigureProviderCalled = true
132 c.ConfigureProviderName = n
133 c.ConfigureProviderConfig = cfg
134 return c.ConfigureProviderError
135}
136
137func (c *MockEvalContext) SetProviderConfig(
138 n string, cfg *ResourceConfig) error {
139 c.SetProviderConfigCalled = true
140 c.SetProviderConfigName = n
141 c.SetProviderConfigConfig = cfg
142 return nil
143}
144
145func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig {
146 c.ParentProviderConfigCalled = true
147 c.ParentProviderConfigName = n
148 return c.ParentProviderConfigConfig
149}
150
151func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} {
152 c.ProviderInputCalled = true
153 c.ProviderInputName = n
154 return c.ProviderInputConfig
155}
156
157func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) {
158 c.SetProviderInputCalled = true
159 c.SetProviderInputName = n
160 c.SetProviderInputConfig = cfg
161}
162
163func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) {
164 c.InitProvisionerCalled = true
165 c.InitProvisionerName = n
166 return c.InitProvisionerProvisioner, c.InitProvisionerError
167}
168
169func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner {
170 c.ProvisionerCalled = true
171 c.ProvisionerName = n
172 return c.ProvisionerProvisioner
173}
174
175func (c *MockEvalContext) CloseProvisioner(n string) error {
176 c.CloseProvisionerCalled = true
177 c.CloseProvisionerName = n
178 return nil
179}
180
181func (c *MockEvalContext) Interpolate(
182 config *config.RawConfig, resource *Resource) (*ResourceConfig, error) {
183 c.InterpolateCalled = true
184 c.InterpolateConfig = config
185 c.InterpolateResource = resource
186 return c.InterpolateConfigResult, c.InterpolateError
187}
188
189func (c *MockEvalContext) Path() []string {
190 c.PathCalled = true
191 return c.PathPath
192}
193
194func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) {
195 c.SetVariablesCalled = true
196 c.SetVariablesModule = n
197 c.SetVariablesVariables = vs
198}
199
200func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) {
201 c.DiffCalled = true
202 return c.DiffDiff, c.DiffLock
203}
204
205func (c *MockEvalContext) State() (*State, *sync.RWMutex) {
206 c.StateCalled = true
207 return c.StateState, c.StateLock
208}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
new file mode 100644
index 0000000..2ae56a7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
@@ -0,0 +1,58 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config"
5)
6
7// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state
8// when there is a resource count with zero/one boundary, i.e. fixing
9// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
10type EvalCountFixZeroOneBoundary struct {
11 Resource *config.Resource
12}
13
14// TODO: test
15func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) {
16 // Get the count, important for knowing whether we're supposed to
17 // be adding the zero, or trimming it.
18 count, err := n.Resource.Count()
19 if err != nil {
20 return nil, err
21 }
22
23 // Figure what to look for and what to replace it with
24 hunt := n.Resource.Id()
25 replace := hunt + ".0"
26 if count < 2 {
27 hunt, replace = replace, hunt
28 }
29
30 state, lock := ctx.State()
31
32 // Get a lock so we can access this instance and potentially make
33 // changes to it.
34 lock.Lock()
35 defer lock.Unlock()
36
37 // Look for the module state. If we don't have one, then it doesn't matter.
38 mod := state.ModuleByPath(ctx.Path())
39 if mod == nil {
40 return nil, nil
41 }
42
43 // Look for the resource state. If we don't have one, then it is okay.
44 rs, ok := mod.Resources[hunt]
45 if !ok {
46 return nil, nil
47 }
48
49 // If the replacement key exists, we just keep both
50 if _, ok := mod.Resources[replace]; ok {
51 return nil, nil
52 }
53
54 mod.Resources[replace] = rs
55 delete(mod.Resources, hunt)
56
57 return nil, nil
58}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
new file mode 100644
index 0000000..91e2b90
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
@@ -0,0 +1,78 @@
1package terraform
2
3import (
4 "log"
5)
6
7// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state
8// when there is a resource count with zero/one boundary, i.e. fixing
9// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
10//
11// This works on the global state.
12type EvalCountFixZeroOneBoundaryGlobal struct{}
13
14// TODO: test
15func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) {
16 // Get the state and lock it since we'll potentially modify it
17 state, lock := ctx.State()
18 lock.Lock()
19 defer lock.Unlock()
20
21 // Prune the state since we require a clean state to work
22 state.prune()
23
24 // Go through each modules since the boundaries are restricted to a
25 // module scope.
26 for _, m := range state.Modules {
27 if err := n.fixModule(m); err != nil {
28 return nil, err
29 }
30 }
31
32 return nil, nil
33}
34
35func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error {
36 // Counts keeps track of keys and their counts
37 counts := make(map[string]int)
38 for k, _ := range m.Resources {
39 // Parse the key
40 key, err := ParseResourceStateKey(k)
41 if err != nil {
42 return err
43 }
44
45 // Set the index to -1 so that we can keep count
46 key.Index = -1
47
48 // Increment
49 counts[key.String()]++
50 }
51
52 // Go through the counts and do the fixup for each resource
53 for raw, count := range counts {
54 // Search and replace this resource
55 search := raw
56 replace := raw + ".0"
57 if count < 2 {
58 search, replace = replace, search
59 }
60 log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace)
61
62 // Look for the resource state. If we don't have one, then it is okay.
63 rs, ok := m.Resources[search]
64 if !ok {
65 continue
66 }
67
68 // If the replacement key exists, we just keep both
69 if _, ok := m.Resources[replace]; ok {
70 continue
71 }
72
73 m.Resources[replace] = rs
74 delete(m.Resources, search)
75 }
76
77 return nil
78}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go
new file mode 100644
index 0000000..54a8333
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go
@@ -0,0 +1,25 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalCountCheckComputed is an EvalNode that checks if a resource count
10// is computed and errors if so. This can possibly happen across a
11// module boundary and we don't yet support this.
12type EvalCountCheckComputed struct {
13 Resource *config.Resource
14}
15
16// TODO: test
17func (n *EvalCountCheckComputed) Eval(ctx EvalContext) (interface{}, error) {
18 if n.Resource.RawCount.Value() == unknownValue() {
19 return nil, fmt.Errorf(
20 "%s: value of 'count' cannot be computed",
21 n.Resource.Id())
22 }
23
24 return nil, nil
25}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
new file mode 100644
index 0000000..6f09526
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
@@ -0,0 +1,478 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strings"
7
8 "github.com/hashicorp/terraform/config"
9)
10
11// EvalCompareDiff is an EvalNode implementation that compares two diffs
12// and errors if the diffs are not equal.
13type EvalCompareDiff struct {
14 Info *InstanceInfo
15 One, Two **InstanceDiff
16}
17
18// TODO: test
19func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) {
20 one, two := *n.One, *n.Two
21
22 // If either are nil, let them be empty
23 if one == nil {
24 one = new(InstanceDiff)
25 one.init()
26 }
27 if two == nil {
28 two = new(InstanceDiff)
29 two.init()
30 }
31 oneId, _ := one.GetAttribute("id")
32 twoId, _ := two.GetAttribute("id")
33 one.DelAttribute("id")
34 two.DelAttribute("id")
35 defer func() {
36 if oneId != nil {
37 one.SetAttribute("id", oneId)
38 }
39 if twoId != nil {
40 two.SetAttribute("id", twoId)
41 }
42 }()
43
44 if same, reason := one.Same(two); !same {
45 log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id)
46 log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason)
47 log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one)
48 log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two)
49 return nil, fmt.Errorf(
50 "%s: diffs didn't match during apply. This is a bug with "+
51 "Terraform and should be reported as a GitHub Issue.\n"+
52 "\n"+
53 "Please include the following information in your report:\n"+
54 "\n"+
55 " Terraform Version: %s\n"+
56 " Resource ID: %s\n"+
57 " Mismatch reason: %s\n"+
58 " Diff One (usually from plan): %#v\n"+
59 " Diff Two (usually from apply): %#v\n"+
60 "\n"+
61 "Also include as much context as you can about your config, state, "+
62 "and the steps you performed to trigger this error.\n",
63 n.Info.Id, Version, n.Info.Id, reason, one, two)
64 }
65
66 return nil, nil
67}
68
69// EvalDiff is an EvalNode implementation that does a refresh for
70// a resource.
71type EvalDiff struct {
72 Name string
73 Info *InstanceInfo
74 Config **ResourceConfig
75 Provider *ResourceProvider
76 Diff **InstanceDiff
77 State **InstanceState
78 OutputDiff **InstanceDiff
79 OutputState **InstanceState
80
81 // Resource is needed to fetch the ignore_changes list so we can
82 // filter user-requested ignored attributes from the diff.
83 Resource *config.Resource
84}
85
86// TODO: test
87func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
88 state := *n.State
89 config := *n.Config
90 provider := *n.Provider
91
92 // Call pre-diff hook
93 err := ctx.Hook(func(h Hook) (HookAction, error) {
94 return h.PreDiff(n.Info, state)
95 })
96 if err != nil {
97 return nil, err
98 }
99
100 // The state for the diff must never be nil
101 diffState := state
102 if diffState == nil {
103 diffState = new(InstanceState)
104 }
105 diffState.init()
106
107 // Diff!
108 diff, err := provider.Diff(n.Info, diffState, config)
109 if err != nil {
110 return nil, err
111 }
112 if diff == nil {
113 diff = new(InstanceDiff)
114 }
115
116 // Set DestroyDeposed if we have deposed instances
117 _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) {
118 if len(rs.Deposed) > 0 {
119 diff.DestroyDeposed = true
120 }
121
122 return nil, nil
123 })
124 if err != nil {
125 return nil, err
126 }
127
128 // Preserve the DestroyTainted flag
129 if n.Diff != nil {
130 diff.SetTainted((*n.Diff).GetDestroyTainted())
131 }
132
133 // Require a destroy if there is an ID and it requires new.
134 if diff.RequiresNew() && state != nil && state.ID != "" {
135 diff.SetDestroy(true)
136 }
137
138 // If we're creating a new resource, compute its ID
139 if diff.RequiresNew() || state == nil || state.ID == "" {
140 var oldID string
141 if state != nil {
142 oldID = state.Attributes["id"]
143 }
144
145 // Add diff to compute new ID
146 diff.init()
147 diff.SetAttribute("id", &ResourceAttrDiff{
148 Old: oldID,
149 NewComputed: true,
150 RequiresNew: true,
151 Type: DiffAttrOutput,
152 })
153 }
154
155 // filter out ignored resources
156 if err := n.processIgnoreChanges(diff); err != nil {
157 return nil, err
158 }
159
160 // Call post-refresh hook
161 err = ctx.Hook(func(h Hook) (HookAction, error) {
162 return h.PostDiff(n.Info, diff)
163 })
164 if err != nil {
165 return nil, err
166 }
167
168 // Update our output
169 *n.OutputDiff = diff
170
171 // Update the state if we care
172 if n.OutputState != nil {
173 *n.OutputState = state
174
175 // Merge our state so that the state is updated with our plan
176 if !diff.Empty() && n.OutputState != nil {
177 *n.OutputState = state.MergeDiff(diff)
178 }
179 }
180
181 return nil, nil
182}
183
184func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
185 if diff == nil || n.Resource == nil || n.Resource.Id() == "" {
186 return nil
187 }
188 ignoreChanges := n.Resource.Lifecycle.IgnoreChanges
189
190 if len(ignoreChanges) == 0 {
191 return nil
192 }
193
194 // If we're just creating the resource, we shouldn't alter the
195 // Diff at all
196 if diff.ChangeType() == DiffCreate {
197 return nil
198 }
199
200 // If the resource has been tainted then we don't process ignore changes
201 // since we MUST recreate the entire resource.
202 if diff.GetDestroyTainted() {
203 return nil
204 }
205
206 attrs := diff.CopyAttributes()
207
208 // get the complete set of keys we want to ignore
209 ignorableAttrKeys := make(map[string]bool)
210 for _, ignoredKey := range ignoreChanges {
211 for k := range attrs {
212 if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) {
213 ignorableAttrKeys[k] = true
214 }
215 }
216 }
217
218 // If the resource was being destroyed, check to see if we can ignore the
219 // reason for it being destroyed.
220 if diff.GetDestroy() {
221 for k, v := range attrs {
222 if k == "id" {
223 // id will always be changed if we intended to replace this instance
224 continue
225 }
226 if v.Empty() || v.NewComputed {
227 continue
228 }
229
230 // If any RequiresNew attribute isn't ignored, we need to keep the diff
231 // as-is to be able to replace the resource.
232 if v.RequiresNew && !ignorableAttrKeys[k] {
233 return nil
234 }
235 }
236
237 // Now that we know that we aren't replacing the instance, we can filter
238 // out all the empty and computed attributes. There may be a bunch of
239 // extraneous attribute diffs for the other non-requires-new attributes
240 // going from "" -> "configval" or "" -> "<computed>".
241 // We must make sure any flatmapped containers are filterred (or not) as a
242 // whole.
243 containers := groupContainers(diff)
244 keep := map[string]bool{}
245 for _, v := range containers {
246 if v.keepDiff() {
247 // At least one key has changes, so list all the sibling keys
248 // to keep in the diff.
249 for k := range v {
250 keep[k] = true
251 }
252 }
253 }
254
255 for k, v := range attrs {
256 if (v.Empty() || v.NewComputed) && !keep[k] {
257 ignorableAttrKeys[k] = true
258 }
259 }
260 }
261
262 // Here we undo the two reactions to RequireNew in EvalDiff - the "id"
263 // attribute diff and the Destroy boolean field
264 log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " +
265 "because after ignore_changes, this diff no longer requires replacement")
266 diff.DelAttribute("id")
267 diff.SetDestroy(false)
268
269 // If we didn't hit any of our early exit conditions, we can filter the diff.
270 for k := range ignorableAttrKeys {
271 log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s",
272 n.Resource.Id(), k)
273 diff.DelAttribute(k)
274 }
275
276 return nil
277}
278
279// a group of key-*ResourceAttrDiff pairs from the same flatmapped container
280type flatAttrDiff map[string]*ResourceAttrDiff
281
282// we need to keep all keys if any of them have a diff
283func (f flatAttrDiff) keepDiff() bool {
284 for _, v := range f {
285 if !v.Empty() && !v.NewComputed {
286 return true
287 }
288 }
289 return false
290}
291
292// sets, lists and maps need to be compared for diff inclusion as a whole, so
293// group the flatmapped keys together for easier comparison.
294func groupContainers(d *InstanceDiff) map[string]flatAttrDiff {
295 isIndex := multiVal.MatchString
296 containers := map[string]flatAttrDiff{}
297 attrs := d.CopyAttributes()
298 // we need to loop once to find the index key
299 for k := range attrs {
300 if isIndex(k) {
301 // add the key, always including the final dot to fully qualify it
302 containers[k[:len(k)-1]] = flatAttrDiff{}
303 }
304 }
305
306 // loop again to find all the sub keys
307 for prefix, values := range containers {
308 for k, attrDiff := range attrs {
309 // we include the index value as well, since it could be part of the diff
310 if strings.HasPrefix(k, prefix) {
311 values[k] = attrDiff
312 }
313 }
314 }
315
316 return containers
317}
318
319// EvalDiffDestroy is an EvalNode implementation that returns a plain
320// destroy diff.
321type EvalDiffDestroy struct {
322 Info *InstanceInfo
323 State **InstanceState
324 Output **InstanceDiff
325}
326
327// TODO: test
328func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
329 state := *n.State
330
331 // If there is no state or we don't have an ID, we're already destroyed
332 if state == nil || state.ID == "" {
333 return nil, nil
334 }
335
336 // Call pre-diff hook
337 err := ctx.Hook(func(h Hook) (HookAction, error) {
338 return h.PreDiff(n.Info, state)
339 })
340 if err != nil {
341 return nil, err
342 }
343
344 // The diff
345 diff := &InstanceDiff{Destroy: true}
346
347 // Call post-diff hook
348 err = ctx.Hook(func(h Hook) (HookAction, error) {
349 return h.PostDiff(n.Info, diff)
350 })
351 if err != nil {
352 return nil, err
353 }
354
355 // Update our output
356 *n.Output = diff
357
358 return nil, nil
359}
360
361// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to
362// the full diff.
363type EvalDiffDestroyModule struct {
364 Path []string
365}
366
367// TODO: test
368func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) {
369 diff, lock := ctx.Diff()
370
371 // Acquire the lock so that we can do this safely concurrently
372 lock.Lock()
373 defer lock.Unlock()
374
375 // Write the diff
376 modDiff := diff.ModuleByPath(n.Path)
377 if modDiff == nil {
378 modDiff = diff.AddModule(n.Path)
379 }
380 modDiff.Destroy = true
381
382 return nil, nil
383}
384
385// EvalFilterDiff is an EvalNode implementation that filters the diff
386// according to some filter.
387type EvalFilterDiff struct {
388 // Input and output
389 Diff **InstanceDiff
390 Output **InstanceDiff
391
392 // Destroy, if true, will only include a destroy diff if it is set.
393 Destroy bool
394}
395
396func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) {
397 if *n.Diff == nil {
398 return nil, nil
399 }
400
401 input := *n.Diff
402 result := new(InstanceDiff)
403
404 if n.Destroy {
405 if input.GetDestroy() || input.RequiresNew() {
406 result.SetDestroy(true)
407 }
408 }
409
410 if n.Output != nil {
411 *n.Output = result
412 }
413
414 return nil, nil
415}
416
417// EvalReadDiff is an EvalNode implementation that writes the diff to
418// the full diff.
419type EvalReadDiff struct {
420 Name string
421 Diff **InstanceDiff
422}
423
424func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) {
425 diff, lock := ctx.Diff()
426
427 // Acquire the lock so that we can do this safely concurrently
428 lock.Lock()
429 defer lock.Unlock()
430
431 // Write the diff
432 modDiff := diff.ModuleByPath(ctx.Path())
433 if modDiff == nil {
434 return nil, nil
435 }
436
437 *n.Diff = modDiff.Resources[n.Name]
438
439 return nil, nil
440}
441
442// EvalWriteDiff is an EvalNode implementation that writes the diff to
443// the full diff.
444type EvalWriteDiff struct {
445 Name string
446 Diff **InstanceDiff
447}
448
449// TODO: test
450func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) {
451 diff, lock := ctx.Diff()
452
453 // The diff to write, if its empty it should write nil
454 var diffVal *InstanceDiff
455 if n.Diff != nil {
456 diffVal = *n.Diff
457 }
458 if diffVal.Empty() {
459 diffVal = nil
460 }
461
462 // Acquire the lock so that we can do this safely concurrently
463 lock.Lock()
464 defer lock.Unlock()
465
466 // Write the diff
467 modDiff := diff.ModuleByPath(ctx.Path())
468 if modDiff == nil {
469 modDiff = diff.AddModule(ctx.Path())
470 }
471 if diffVal != nil {
472 modDiff.Resources[n.Name] = diffVal
473 } else {
474 delete(modDiff.Resources, n.Name)
475 }
476
477 return nil, nil
478}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go
new file mode 100644
index 0000000..470f798
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go
@@ -0,0 +1,20 @@
1package terraform
2
3// EvalReturnError is an EvalNode implementation that returns an
4// error if it is present.
5//
6// This is useful for scenarios where an error has been captured by
7// another EvalNode (like EvalApply) for special EvalTree-based error
8// handling, and that handling has completed, so the error should be
9// returned normally.
10type EvalReturnError struct {
11 Error *error
12}
13
14func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) {
15 if n.Error == nil {
16 return nil, nil
17 }
18
19 return nil, *n.Error
20}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go
new file mode 100644
index 0000000..711c625
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go
@@ -0,0 +1,25 @@
1package terraform
2
3// EvalNodeFilterFunc is the callback used to replace a node with
4// another to node. To not do the replacement, just return the input node.
5type EvalNodeFilterFunc func(EvalNode) EvalNode
6
7// EvalNodeFilterable is an interface that can be implemented by
8// EvalNodes to allow filtering of sub-elements. Note that this isn't
9// a common thing to implement and you probably don't need it.
10type EvalNodeFilterable interface {
11 EvalNode
12 Filter(EvalNodeFilterFunc)
13}
14
15// EvalFilter runs the filter on the given node and returns the
16// final filtered value. This should be called rather than checking
17// the EvalNode directly since this will properly handle EvalNodeFilterables.
18func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode {
19 if f, ok := node.(EvalNodeFilterable); ok {
20 f.Filter(fn)
21 return node
22 }
23
24 return fn(node)
25}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go
new file mode 100644
index 0000000..1a55f02
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go
@@ -0,0 +1,49 @@
1package terraform
2
3// EvalNodeOpFilterable is an interface that EvalNodes can implement
4// to be filterable by the operation that is being run on Terraform.
5type EvalNodeOpFilterable interface {
6 IncludeInOp(walkOperation) bool
7}
8
9// EvalNodeFilterOp returns a filter function that filters nodes that
10// include themselves in specific operations.
11func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc {
12 return func(n EvalNode) EvalNode {
13 include := true
14 if of, ok := n.(EvalNodeOpFilterable); ok {
15 include = of.IncludeInOp(op)
16 }
17 if include {
18 return n
19 }
20
21 return EvalNoop{}
22 }
23}
24
25// EvalOpFilter is an EvalNode implementation that is a proxy to
26// another node but filters based on the operation.
27type EvalOpFilter struct {
28 // Ops is the list of operations to include this node in.
29 Ops []walkOperation
30
31 // Node is the node to execute
32 Node EvalNode
33}
34
35// TODO: test
36func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) {
37 return EvalRaw(n.Node, ctx)
38}
39
40// EvalNodeOpFilterable impl.
41func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool {
42 for _, v := range n.Ops {
43 if v == op {
44 return true
45 }
46 }
47
48 return false
49}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go
new file mode 100644
index 0000000..d6b46a1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go
@@ -0,0 +1,26 @@
1package terraform
2
3// EvalIf is an EvalNode that is a conditional.
4type EvalIf struct {
5 If func(EvalContext) (bool, error)
6 Then EvalNode
7 Else EvalNode
8}
9
10// TODO: test
11func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) {
12 yes, err := n.If(ctx)
13 if err != nil {
14 return nil, err
15 }
16
17 if yes {
18 return EvalRaw(n.Then, ctx)
19 } else {
20 if n.Else != nil {
21 return EvalRaw(n.Else, ctx)
22 }
23 }
24
25 return nil, nil
26}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
new file mode 100644
index 0000000..62cc581
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
@@ -0,0 +1,76 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// EvalImportState is an EvalNode implementation that performs an
8// ImportState operation on a provider. This will return the imported
9// states but won't modify any actual state.
10type EvalImportState struct {
11 Provider *ResourceProvider
12 Info *InstanceInfo
13 Id string
14 Output *[]*InstanceState
15}
16
17// TODO: test
18func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) {
19 provider := *n.Provider
20
21 {
22 // Call pre-import hook
23 err := ctx.Hook(func(h Hook) (HookAction, error) {
24 return h.PreImportState(n.Info, n.Id)
25 })
26 if err != nil {
27 return nil, err
28 }
29 }
30
31 // Import!
32 state, err := provider.ImportState(n.Info, n.Id)
33 if err != nil {
34 return nil, fmt.Errorf(
35 "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err)
36 }
37
38 if n.Output != nil {
39 *n.Output = state
40 }
41
42 {
43 // Call post-import hook
44 err := ctx.Hook(func(h Hook) (HookAction, error) {
45 return h.PostImportState(n.Info, state)
46 })
47 if err != nil {
48 return nil, err
49 }
50 }
51
52 return nil, nil
53}
54
55// EvalImportStateVerify verifies the state after ImportState and
56// after the refresh to make sure it is non-nil and valid.
57type EvalImportStateVerify struct {
58 Info *InstanceInfo
59 Id string
60 State **InstanceState
61}
62
63// TODO: test
64func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) {
65 state := *n.State
66 if state.Empty() {
67 return nil, fmt.Errorf(
68 "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+
69 "exist. Please verify the ID is correct. You cannot import non-existent\n"+
70 "resources using Terraform import.",
71 n.Info.HumanId(),
72 n.Id)
73 }
74
75 return nil, nil
76}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
new file mode 100644
index 0000000..6825ff5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
@@ -0,0 +1,24 @@
1package terraform
2
3import "github.com/hashicorp/terraform/config"
4
5// EvalInterpolate is an EvalNode implementation that takes a raw
6// configuration and interpolates it.
7type EvalInterpolate struct {
8 Config *config.RawConfig
9 Resource *Resource
10 Output **ResourceConfig
11}
12
13func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) {
14 rc, err := ctx.Interpolate(n.Config, n.Resource)
15 if err != nil {
16 return nil, err
17 }
18
19 if n.Output != nil {
20 *n.Output = rc
21 }
22
23 return nil, nil
24}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go
new file mode 100644
index 0000000..f4bc822
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go
@@ -0,0 +1,8 @@
1package terraform
2
3// EvalNoop is an EvalNode that does nothing.
4type EvalNoop struct{}
5
6func (EvalNoop) Eval(EvalContext) (interface{}, error) {
7 return nil, nil
8}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
new file mode 100644
index 0000000..cf61781
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
@@ -0,0 +1,119 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/config"
8)
9
10// EvalDeleteOutput is an EvalNode implementation that deletes an output
11// from the state.
12type EvalDeleteOutput struct {
13 Name string
14}
15
16// TODO: test
17func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) {
18 state, lock := ctx.State()
19 if state == nil {
20 return nil, nil
21 }
22
23 // Get a write lock so we can access this instance
24 lock.Lock()
25 defer lock.Unlock()
26
27 // Look for the module state. If we don't have one, create it.
28 mod := state.ModuleByPath(ctx.Path())
29 if mod == nil {
30 return nil, nil
31 }
32
33 delete(mod.Outputs, n.Name)
34
35 return nil, nil
36}
37
38// EvalWriteOutput is an EvalNode implementation that writes the output
39// for the given name to the current state.
40type EvalWriteOutput struct {
41 Name string
42 Sensitive bool
43 Value *config.RawConfig
44}
45
46// TODO: test
47func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
48 cfg, err := ctx.Interpolate(n.Value, nil)
49 if err != nil {
50 // Log error but continue anyway
51 log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err)
52 }
53
54 state, lock := ctx.State()
55 if state == nil {
56 return nil, fmt.Errorf("cannot write state to nil state")
57 }
58
59 // Get a write lock so we can access this instance
60 lock.Lock()
61 defer lock.Unlock()
62
63 // Look for the module state. If we don't have one, create it.
64 mod := state.ModuleByPath(ctx.Path())
65 if mod == nil {
66 mod = state.AddModule(ctx.Path())
67 }
68
69 // Get the value from the config
70 var valueRaw interface{} = config.UnknownVariableValue
71 if cfg != nil {
72 var ok bool
73 valueRaw, ok = cfg.Get("value")
74 if !ok {
75 valueRaw = ""
76 }
77 if cfg.IsComputed("value") {
78 valueRaw = config.UnknownVariableValue
79 }
80 }
81
82 switch valueTyped := valueRaw.(type) {
83 case string:
84 mod.Outputs[n.Name] = &OutputState{
85 Type: "string",
86 Sensitive: n.Sensitive,
87 Value: valueTyped,
88 }
89 case []interface{}:
90 mod.Outputs[n.Name] = &OutputState{
91 Type: "list",
92 Sensitive: n.Sensitive,
93 Value: valueTyped,
94 }
95 case map[string]interface{}:
96 mod.Outputs[n.Name] = &OutputState{
97 Type: "map",
98 Sensitive: n.Sensitive,
99 Value: valueTyped,
100 }
101 case []map[string]interface{}:
102 // an HCL map is multi-valued, so if this was read out of a config the
103 // map may still be in a slice.
104 if len(valueTyped) == 1 {
105 mod.Outputs[n.Name] = &OutputState{
106 Type: "map",
107 Sensitive: n.Sensitive,
108 Value: valueTyped[0],
109 }
110 break
111 }
112 return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map",
113 n.Name, valueTyped, len(valueTyped))
114 default:
115 return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped)
116 }
117
118 return nil, nil
119}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
new file mode 100644
index 0000000..092fd18
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
@@ -0,0 +1,164 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalSetProviderConfig sets the parent configuration for a provider
10// without configuring that provider, validating it, etc.
11type EvalSetProviderConfig struct {
12 Provider string
13 Config **ResourceConfig
14}
15
16func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
17 return nil, ctx.SetProviderConfig(n.Provider, *n.Config)
18}
19
20// EvalBuildProviderConfig outputs a *ResourceConfig that is properly
21// merged with parents and inputs on top of what is configured in the file.
22type EvalBuildProviderConfig struct {
23 Provider string
24 Config **ResourceConfig
25 Output **ResourceConfig
26}
27
28func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
29 cfg := *n.Config
30
31 // If we have a configuration set, then merge that in
32 if input := ctx.ProviderInput(n.Provider); input != nil {
33 // "input" is a map of the subset of config values that were known
34 // during the input walk, set by EvalInputProvider. Note that
35 // in particular it does *not* include attributes that had
36 // computed values at input time; those appear *only* in
37 // "cfg" here.
38 rc, err := config.NewRawConfig(input)
39 if err != nil {
40 return nil, err
41 }
42
43 merged := cfg.raw.Merge(rc)
44 cfg = NewResourceConfig(merged)
45 }
46
47 // Get the parent configuration if there is one
48 if parent := ctx.ParentProviderConfig(n.Provider); parent != nil {
49 merged := cfg.raw.Merge(parent.raw)
50 cfg = NewResourceConfig(merged)
51 }
52
53 *n.Output = cfg
54 return nil, nil
55}
56
57// EvalConfigProvider is an EvalNode implementation that configures
58// a provider that is already initialized and retrieved.
59type EvalConfigProvider struct {
60 Provider string
61 Config **ResourceConfig
62}
63
64func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
65 return nil, ctx.ConfigureProvider(n.Provider, *n.Config)
66}
67
68// EvalInitProvider is an EvalNode implementation that initializes a provider
69// and returns nothing. The provider can be retrieved again with the
70// EvalGetProvider node.
71type EvalInitProvider struct {
72 Name string
73}
74
75func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) {
76 return ctx.InitProvider(n.Name)
77}
78
79// EvalCloseProvider is an EvalNode implementation that closes provider
80// connections that aren't needed anymore.
81type EvalCloseProvider struct {
82 Name string
83}
84
85func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) {
86 ctx.CloseProvider(n.Name)
87 return nil, nil
88}
89
90// EvalGetProvider is an EvalNode implementation that retrieves an already
91// initialized provider instance for the given name.
92type EvalGetProvider struct {
93 Name string
94 Output *ResourceProvider
95}
96
97func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) {
98 result := ctx.Provider(n.Name)
99 if result == nil {
100 return nil, fmt.Errorf("provider %s not initialized", n.Name)
101 }
102
103 if n.Output != nil {
104 *n.Output = result
105 }
106
107 return nil, nil
108}
109
110// EvalInputProvider is an EvalNode implementation that asks for input
111// for the given provider configurations.
112type EvalInputProvider struct {
113 Name string
114 Provider *ResourceProvider
115 Config **ResourceConfig
116}
117
118func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) {
119 // If we already configured this provider, then don't do this again
120 if v := ctx.ProviderInput(n.Name); v != nil {
121 return nil, nil
122 }
123
124 rc := *n.Config
125
126 // Wrap the input into a namespace
127 input := &PrefixUIInput{
128 IdPrefix: fmt.Sprintf("provider.%s", n.Name),
129 QueryPrefix: fmt.Sprintf("provider.%s.", n.Name),
130 UIInput: ctx.Input(),
131 }
132
133 // Go through each provider and capture the input necessary
134 // to satisfy it.
135 config, err := (*n.Provider).Input(input, rc)
136 if err != nil {
137 return nil, fmt.Errorf(
138 "Error configuring %s: %s", n.Name, err)
139 }
140
141 // Set the input that we received so that child modules don't attempt
142 // to ask for input again.
143 if config != nil && len(config.Config) > 0 {
144 // This repository of provider input results on the context doesn't
145 // retain config.ComputedKeys, so we need to filter those out here
146 // in order that later users of this data won't try to use the unknown
147 // value placeholder as if it were a literal value. This map is just
148 // of known values we've been able to complete so far; dynamic stuff
149 // will be merged in by EvalBuildProviderConfig on subsequent
150 // (post-input) walks.
151 confMap := config.Config
152 if config.ComputedKeys != nil {
153 for _, key := range config.ComputedKeys {
154 delete(confMap, key)
155 }
156 }
157
158 ctx.SetProviderInput(n.Name, confMap)
159 } else {
160 ctx.SetProviderInput(n.Name, map[string]interface{}{})
161 }
162
163 return nil, nil
164}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
new file mode 100644
index 0000000..89579c0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
@@ -0,0 +1,47 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner
8// and returns nothing. The provisioner can be retrieved again with the
9// EvalGetProvisioner node.
10type EvalInitProvisioner struct {
11 Name string
12}
13
14func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) {
15 return ctx.InitProvisioner(n.Name)
16}
17
18// EvalCloseProvisioner is an EvalNode implementation that closes provisioner
19// connections that aren't needed anymore.
20type EvalCloseProvisioner struct {
21 Name string
22}
23
24func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) {
25 ctx.CloseProvisioner(n.Name)
26 return nil, nil
27}
28
29// EvalGetProvisioner is an EvalNode implementation that retrieves an already
30// initialized provisioner instance for the given name.
31type EvalGetProvisioner struct {
32 Name string
33 Output *ResourceProvisioner
34}
35
36func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) {
37 result := ctx.Provisioner(n.Name)
38 if result == nil {
39 return nil, fmt.Errorf("provisioner %s not initialized", n.Name)
40 }
41
42 if n.Output != nil {
43 *n.Output = result
44 }
45
46 return result, nil
47}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
new file mode 100644
index 0000000..fb85a28
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
@@ -0,0 +1,139 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// EvalReadDataDiff is an EvalNode implementation that executes a data
8// resource's ReadDataDiff method to discover what attributes it exports.
9type EvalReadDataDiff struct {
10 Provider *ResourceProvider
11 Output **InstanceDiff
12 OutputState **InstanceState
13 Config **ResourceConfig
14 Info *InstanceInfo
15
16 // Set Previous when re-evaluating diff during apply, to ensure that
17 // the "Destroy" flag is preserved.
18 Previous **InstanceDiff
19}
20
21func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) {
22 // TODO: test
23
24 err := ctx.Hook(func(h Hook) (HookAction, error) {
25 return h.PreDiff(n.Info, nil)
26 })
27 if err != nil {
28 return nil, err
29 }
30
31 var diff *InstanceDiff
32
33 if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() {
34 // If we're re-diffing for a diff that was already planning to
35 // destroy, then we'll just continue with that plan.
36 diff = &InstanceDiff{Destroy: true}
37 } else {
38 provider := *n.Provider
39 config := *n.Config
40
41 var err error
42 diff, err = provider.ReadDataDiff(n.Info, config)
43 if err != nil {
44 return nil, err
45 }
46 if diff == nil {
47 diff = new(InstanceDiff)
48 }
49
50 // if id isn't explicitly set then it's always computed, because we're
51 // always "creating a new resource".
52 diff.init()
53 if _, ok := diff.Attributes["id"]; !ok {
54 diff.SetAttribute("id", &ResourceAttrDiff{
55 Old: "",
56 NewComputed: true,
57 RequiresNew: true,
58 Type: DiffAttrOutput,
59 })
60 }
61 }
62
63 err = ctx.Hook(func(h Hook) (HookAction, error) {
64 return h.PostDiff(n.Info, diff)
65 })
66 if err != nil {
67 return nil, err
68 }
69
70 *n.Output = diff
71
72 if n.OutputState != nil {
73 state := &InstanceState{}
74 *n.OutputState = state
75
76 // Apply the diff to the returned state, so the state includes
77 // any attribute values that are not computed.
78 if !diff.Empty() && n.OutputState != nil {
79 *n.OutputState = state.MergeDiff(diff)
80 }
81 }
82
83 return nil, nil
84}
85
86// EvalReadDataApply is an EvalNode implementation that executes a data
87// resource's ReadDataApply method to read data from the data source.
88type EvalReadDataApply struct {
89 Provider *ResourceProvider
90 Output **InstanceState
91 Diff **InstanceDiff
92 Info *InstanceInfo
93}
94
95func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
96 // TODO: test
97 provider := *n.Provider
98 diff := *n.Diff
99
100 // If the diff is for *destroying* this resource then we'll
101 // just drop its state and move on, since data resources don't
102 // support an actual "destroy" action.
103 if diff != nil && diff.GetDestroy() {
104 if n.Output != nil {
105 *n.Output = nil
106 }
107 return nil, nil
108 }
109
110 // For the purpose of external hooks we present a data apply as a
111 // "Refresh" rather than an "Apply" because creating a data source
112 // is presented to users/callers as a "read" operation.
113 err := ctx.Hook(func(h Hook) (HookAction, error) {
114 // We don't have a state yet, so we'll just give the hook an
115 // empty one to work with.
116 return h.PreRefresh(n.Info, &InstanceState{})
117 })
118 if err != nil {
119 return nil, err
120 }
121
122 state, err := provider.ReadDataApply(n.Info, diff)
123 if err != nil {
124 return nil, fmt.Errorf("%s: %s", n.Info.Id, err)
125 }
126
127 err = ctx.Hook(func(h Hook) (HookAction, error) {
128 return h.PostRefresh(n.Info, state)
129 })
130 if err != nil {
131 return nil, err
132 }
133
134 if n.Output != nil {
135 *n.Output = state
136 }
137
138 return nil, nil
139}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
new file mode 100644
index 0000000..fa2b812
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
@@ -0,0 +1,55 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6)
7
8// EvalRefresh is an EvalNode implementation that does a refresh for
9// a resource.
10type EvalRefresh struct {
11 Provider *ResourceProvider
12 State **InstanceState
13 Info *InstanceInfo
14 Output **InstanceState
15}
16
17// TODO: test
18func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
19 provider := *n.Provider
20 state := *n.State
21
22 // If we have no state, we don't do any refreshing
23 if state == nil {
24 log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id)
25 return nil, nil
26 }
27
28 // Call pre-refresh hook
29 err := ctx.Hook(func(h Hook) (HookAction, error) {
30 return h.PreRefresh(n.Info, state)
31 })
32 if err != nil {
33 return nil, err
34 }
35
36 // Refresh!
37 state, err = provider.Refresh(n.Info, state)
38 if err != nil {
39 return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error())
40 }
41
42 // Call post-refresh hook
43 err = ctx.Hook(func(h Hook) (HookAction, error) {
44 return h.PostRefresh(n.Info, state)
45 })
46 if err != nil {
47 return nil, err
48 }
49
50 if n.Output != nil {
51 *n.Output = state
52 }
53
54 return nil, nil
55}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
new file mode 100644
index 0000000..5eca678
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
@@ -0,0 +1,13 @@
1package terraform
2
3// EvalInstanceInfo is an EvalNode implementation that fills in the
4// InstanceInfo as much as it can.
5type EvalInstanceInfo struct {
6 Info *InstanceInfo
7}
8
9// TODO: test
10func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) {
11 n.Info.ModulePath = ctx.Path()
12 return nil, nil
13}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
new file mode 100644
index 0000000..82d8178
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
@@ -0,0 +1,27 @@
1package terraform
2
3// EvalSequence is an EvalNode that evaluates in sequence.
4type EvalSequence struct {
5 Nodes []EvalNode
6}
7
8func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) {
9 for _, n := range n.Nodes {
10 if n == nil {
11 continue
12 }
13
14 if _, err := EvalRaw(n, ctx); err != nil {
15 return nil, err
16 }
17 }
18
19 return nil, nil
20}
21
22// EvalNodeFilterable impl.
23func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) {
24 for i, node := range n.Nodes {
25 n.Nodes[i] = fn(node)
26 }
27}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
new file mode 100644
index 0000000..126a0e6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
@@ -0,0 +1,324 @@
1package terraform
2
3import "fmt"
4
5// EvalReadState is an EvalNode implementation that reads the
6// primary InstanceState for a specific resource out of the state.
7type EvalReadState struct {
8 Name string
9 Output **InstanceState
10}
11
12func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) {
13 return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
14 return rs.Primary, nil
15 })
16}
17
18// EvalReadStateDeposed is an EvalNode implementation that reads the
19// deposed InstanceState for a specific resource out of the state
20type EvalReadStateDeposed struct {
21 Name string
22 Output **InstanceState
23 // Index indicates which instance in the Deposed list to target, or -1 for
24 // the last item.
25 Index int
26}
27
28func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
29 return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
30 // Get the index. If it is negative, then we get the last one
31 idx := n.Index
32 if idx < 0 {
33 idx = len(rs.Deposed) - 1
34 }
35 if idx >= 0 && idx < len(rs.Deposed) {
36 return rs.Deposed[idx], nil
37 } else {
38 return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs)
39 }
40 })
41}
42
43// Does the bulk of the work for the various flavors of ReadState eval nodes.
44// Each node just provides a reader function to get from the ResourceState to the
45// InstanceState, and this takes care of all the plumbing.
46func readInstanceFromState(
47 ctx EvalContext,
48 resourceName string,
49 output **InstanceState,
50 readerFn func(*ResourceState) (*InstanceState, error),
51) (*InstanceState, error) {
52 state, lock := ctx.State()
53
54 // Get a read lock so we can access this instance
55 lock.RLock()
56 defer lock.RUnlock()
57
58 // Look for the module state. If we don't have one, then it doesn't matter.
59 mod := state.ModuleByPath(ctx.Path())
60 if mod == nil {
61 return nil, nil
62 }
63
64 // Look for the resource state. If we don't have one, then it is okay.
65 rs := mod.Resources[resourceName]
66 if rs == nil {
67 return nil, nil
68 }
69
70 // Use the delegate function to get the instance state from the resource state
71 is, err := readerFn(rs)
72 if err != nil {
73 return nil, err
74 }
75
76 // Write the result to the output pointer
77 if output != nil {
78 *output = is
79 }
80
81 return is, nil
82}
83
84// EvalRequireState is an EvalNode implementation that early exits
85// if the state doesn't have an ID.
86type EvalRequireState struct {
87 State **InstanceState
88}
89
90func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) {
91 if n.State == nil {
92 return nil, EvalEarlyExitError{}
93 }
94
95 state := *n.State
96 if state == nil || state.ID == "" {
97 return nil, EvalEarlyExitError{}
98 }
99
100 return nil, nil
101}
102
103// EvalUpdateStateHook is an EvalNode implementation that calls the
104// PostStateUpdate hook with the current state.
105type EvalUpdateStateHook struct{}
106
107func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) {
108 state, lock := ctx.State()
109
110 // Get a full lock. Even calling something like WriteState can modify
111 // (prune) the state, so we need the full lock.
112 lock.Lock()
113 defer lock.Unlock()
114
115 // Call the hook
116 err := ctx.Hook(func(h Hook) (HookAction, error) {
117 return h.PostStateUpdate(state)
118 })
119 if err != nil {
120 return nil, err
121 }
122
123 return nil, nil
124}
125
126// EvalWriteState is an EvalNode implementation that writes the
127// primary InstanceState for a specific resource into the state.
128type EvalWriteState struct {
129 Name string
130 ResourceType string
131 Provider string
132 Dependencies []string
133 State **InstanceState
134}
135
136func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) {
137 return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
138 func(rs *ResourceState) error {
139 rs.Primary = *n.State
140 return nil
141 },
142 )
143}
144
145// EvalWriteStateDeposed is an EvalNode implementation that writes
146// an InstanceState out to the Deposed list of a resource in the state.
147type EvalWriteStateDeposed struct {
148 Name string
149 ResourceType string
150 Provider string
151 Dependencies []string
152 State **InstanceState
153 // Index indicates which instance in the Deposed list to target, or -1 to append.
154 Index int
155}
156
157func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
158 return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
159 func(rs *ResourceState) error {
160 if n.Index == -1 {
161 rs.Deposed = append(rs.Deposed, *n.State)
162 } else {
163 rs.Deposed[n.Index] = *n.State
164 }
165 return nil
166 },
167 )
168}
169
170// Pulls together the common tasks of the EvalWriteState nodes. All the args
171// are passed directly down from the EvalNode along with a `writer` function
172// which is yielded the *ResourceState and is responsible for writing an
173// InstanceState to the proper field in the ResourceState.
174func writeInstanceToState(
175 ctx EvalContext,
176 resourceName string,
177 resourceType string,
178 provider string,
179 dependencies []string,
180 writerFn func(*ResourceState) error,
181) (*InstanceState, error) {
182 state, lock := ctx.State()
183 if state == nil {
184 return nil, fmt.Errorf("cannot write state to nil state")
185 }
186
187 // Get a write lock so we can access this instance
188 lock.Lock()
189 defer lock.Unlock()
190
191 // Look for the module state. If we don't have one, create it.
192 mod := state.ModuleByPath(ctx.Path())
193 if mod == nil {
194 mod = state.AddModule(ctx.Path())
195 }
196
197 // Look for the resource state.
198 rs := mod.Resources[resourceName]
199 if rs == nil {
200 rs = &ResourceState{}
201 rs.init()
202 mod.Resources[resourceName] = rs
203 }
204 rs.Type = resourceType
205 rs.Dependencies = dependencies
206 rs.Provider = provider
207
208 if err := writerFn(rs); err != nil {
209 return nil, err
210 }
211
212 return nil, nil
213}
214
215// EvalClearPrimaryState is an EvalNode implementation that clears the primary
216// instance from a resource state.
217type EvalClearPrimaryState struct {
218 Name string
219}
220
221func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) {
222 state, lock := ctx.State()
223
224 // Get a read lock so we can access this instance
225 lock.RLock()
226 defer lock.RUnlock()
227
228 // Look for the module state. If we don't have one, then it doesn't matter.
229 mod := state.ModuleByPath(ctx.Path())
230 if mod == nil {
231 return nil, nil
232 }
233
234 // Look for the resource state. If we don't have one, then it is okay.
235 rs := mod.Resources[n.Name]
236 if rs == nil {
237 return nil, nil
238 }
239
240 // Clear primary from the resource state
241 rs.Primary = nil
242
243 return nil, nil
244}
245
246// EvalDeposeState is an EvalNode implementation that takes the primary
247// out of a state and makes it Deposed. This is done at the beginning of
248// create-before-destroy calls so that the create can create while preserving
249// the old state of the to-be-destroyed resource.
250type EvalDeposeState struct {
251 Name string
252}
253
254// TODO: test
255func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) {
256 state, lock := ctx.State()
257
258 // Get a read lock so we can access this instance
259 lock.RLock()
260 defer lock.RUnlock()
261
262 // Look for the module state. If we don't have one, then it doesn't matter.
263 mod := state.ModuleByPath(ctx.Path())
264 if mod == nil {
265 return nil, nil
266 }
267
268 // Look for the resource state. If we don't have one, then it is okay.
269 rs := mod.Resources[n.Name]
270 if rs == nil {
271 return nil, nil
272 }
273
274 // If we don't have a primary, we have nothing to depose
275 if rs.Primary == nil {
276 return nil, nil
277 }
278
279 // Depose
280 rs.Deposed = append(rs.Deposed, rs.Primary)
281 rs.Primary = nil
282
283 return nil, nil
284}
285
286// EvalUndeposeState is an EvalNode implementation that reads the
287// InstanceState for a specific resource out of the state.
288type EvalUndeposeState struct {
289 Name string
290 State **InstanceState
291}
292
293// TODO: test
294func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) {
295 state, lock := ctx.State()
296
297 // Get a read lock so we can access this instance
298 lock.RLock()
299 defer lock.RUnlock()
300
301 // Look for the module state. If we don't have one, then it doesn't matter.
302 mod := state.ModuleByPath(ctx.Path())
303 if mod == nil {
304 return nil, nil
305 }
306
307 // Look for the resource state. If we don't have one, then it is okay.
308 rs := mod.Resources[n.Name]
309 if rs == nil {
310 return nil, nil
311 }
312
313 // If we don't have any desposed resource, then we don't have anything to do
314 if len(rs.Deposed) == 0 {
315 return nil, nil
316 }
317
318 // Undepose
319 idx := len(rs.Deposed) - 1
320 rs.Primary = rs.Deposed[idx]
321 rs.Deposed[idx] = *n.State
322
323 return nil, nil
324}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
new file mode 100644
index 0000000..478aa64
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
@@ -0,0 +1,227 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/mitchellh/mapstructure"
8)
9
10// EvalValidateError is the error structure returned if there were
11// validation errors.
12type EvalValidateError struct {
13 Warnings []string
14 Errors []error
15}
16
17func (e *EvalValidateError) Error() string {
18 return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors)
19}
20
21// EvalValidateCount is an EvalNode implementation that validates
22// the count of a resource.
23type EvalValidateCount struct {
24 Resource *config.Resource
25}
26
27// TODO: test
28func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) {
29 var count int
30 var errs []error
31 var err error
32 if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil {
33 errs = append(errs, fmt.Errorf(
34 "Failed to interpolate count: %s", err))
35 goto RETURN
36 }
37
38 count, err = n.Resource.Count()
39 if err != nil {
40 // If we can't get the count during validation, then
41 // just replace it with the number 1.
42 c := n.Resource.RawCount.Config()
43 c[n.Resource.RawCount.Key] = "1"
44 count = 1
45 }
46 err = nil
47
48 if count < 0 {
49 errs = append(errs, fmt.Errorf(
50 "Count is less than zero: %d", count))
51 }
52
53RETURN:
54 if len(errs) != 0 {
55 err = &EvalValidateError{
56 Errors: errs,
57 }
58 }
59 return nil, err
60}
61
62// EvalValidateProvider is an EvalNode implementation that validates
63// the configuration of a resource.
64type EvalValidateProvider struct {
65 Provider *ResourceProvider
66 Config **ResourceConfig
67}
68
69func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
70 provider := *n.Provider
71 config := *n.Config
72
73 warns, errs := provider.Validate(config)
74 if len(warns) == 0 && len(errs) == 0 {
75 return nil, nil
76 }
77
78 return nil, &EvalValidateError{
79 Warnings: warns,
80 Errors: errs,
81 }
82}
83
84// EvalValidateProvisioner is an EvalNode implementation that validates
85// the configuration of a resource.
86type EvalValidateProvisioner struct {
87 Provisioner *ResourceProvisioner
88 Config **ResourceConfig
89 ConnConfig **ResourceConfig
90}
91
92func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) {
93 provisioner := *n.Provisioner
94 config := *n.Config
95 var warns []string
96 var errs []error
97
98 {
99 // Validate the provisioner's own config first
100 w, e := provisioner.Validate(config)
101 warns = append(warns, w...)
102 errs = append(errs, e...)
103 }
104
105 {
106 // Now validate the connection config, which might either be from
107 // the provisioner block itself or inherited from the resource's
108 // shared connection info.
109 w, e := n.validateConnConfig(*n.ConnConfig)
110 warns = append(warns, w...)
111 errs = append(errs, e...)
112 }
113
114 if len(warns) == 0 && len(errs) == 0 {
115 return nil, nil
116 }
117
118 return nil, &EvalValidateError{
119 Warnings: warns,
120 Errors: errs,
121 }
122}
123
124func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) {
125 // We can't comprehensively validate the connection config since its
126 // final structure is decided by the communicator and we can't instantiate
127 // that until we have a complete instance state. However, we *can* catch
128 // configuration keys that are not valid for *any* communicator, catching
129 // typos early rather than waiting until we actually try to run one of
130 // the resource's provisioners.
131
132 type connConfigSuperset struct {
133 // All attribute types are interface{} here because at this point we
134 // may still have unresolved interpolation expressions, which will
135 // appear as strings regardless of the final goal type.
136
137 Type interface{} `mapstructure:"type"`
138 User interface{} `mapstructure:"user"`
139 Password interface{} `mapstructure:"password"`
140 Host interface{} `mapstructure:"host"`
141 Port interface{} `mapstructure:"port"`
142 Timeout interface{} `mapstructure:"timeout"`
143 ScriptPath interface{} `mapstructure:"script_path"`
144
145 // For type=ssh only (enforced in ssh communicator)
146 PrivateKey interface{} `mapstructure:"private_key"`
147 Agent interface{} `mapstructure:"agent"`
148 BastionHost interface{} `mapstructure:"bastion_host"`
149 BastionPort interface{} `mapstructure:"bastion_port"`
150 BastionUser interface{} `mapstructure:"bastion_user"`
151 BastionPassword interface{} `mapstructure:"bastion_password"`
152 BastionPrivateKey interface{} `mapstructure:"bastion_private_key"`
153
154 // For type=winrm only (enforced in winrm communicator)
155 HTTPS interface{} `mapstructure:"https"`
156 Insecure interface{} `mapstructure:"insecure"`
157 CACert interface{} `mapstructure:"cacert"`
158 }
159
160 var metadata mapstructure.Metadata
161 decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
162 Metadata: &metadata,
163 Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys
164 })
165 if err != nil {
166 // should never happen
167 errs = append(errs, err)
168 return
169 }
170
171 if err := decoder.Decode(connConfig.Config); err != nil {
172 errs = append(errs, err)
173 return
174 }
175
176 for _, attrName := range metadata.Unused {
177 errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName))
178 }
179 return
180}
181
182// EvalValidateResource is an EvalNode implementation that validates
183// the configuration of a resource.
184type EvalValidateResource struct {
185 Provider *ResourceProvider
186 Config **ResourceConfig
187 ResourceName string
188 ResourceType string
189 ResourceMode config.ResourceMode
190
191 // IgnoreWarnings means that warnings will not be passed through. This allows
192 // "just-in-time" passes of validation to continue execution through warnings.
193 IgnoreWarnings bool
194}
195
196func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) {
197 provider := *n.Provider
198 cfg := *n.Config
199 var warns []string
200 var errs []error
201 // Provider entry point varies depending on resource mode, because
202 // managed resources and data resources are two distinct concepts
203 // in the provider abstraction.
204 switch n.ResourceMode {
205 case config.ManagedResourceMode:
206 warns, errs = provider.ValidateResource(n.ResourceType, cfg)
207 case config.DataResourceMode:
208 warns, errs = provider.ValidateDataSource(n.ResourceType, cfg)
209 }
210
211 // If the resource name doesn't match the name regular
212 // expression, show an error.
213 if !config.NameRegexp.Match([]byte(n.ResourceName)) {
214 errs = append(errs, fmt.Errorf(
215 "%s: resource name can only contain letters, numbers, "+
216 "dashes, and underscores.", n.ResourceName))
217 }
218
219 if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 {
220 return nil, nil
221 }
222
223 return nil, &EvalValidateError{
224 Warnings: warns,
225 Errors: errs,
226 }
227}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
new file mode 100644
index 0000000..ae4436a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
@@ -0,0 +1,74 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalValidateResourceSelfRef is an EvalNode implementation that validates that
10// a configuration doesn't contain a reference to the resource itself.
11//
12// This must be done prior to interpolating configuration in order to avoid
13// any infinite loop scenarios.
14type EvalValidateResourceSelfRef struct {
15 Addr **ResourceAddress
16 Config **config.RawConfig
17}
18
19func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) {
20 addr := *n.Addr
21 conf := *n.Config
22
23 // Go through the variables and find self references
24 var errs []error
25 for k, raw := range conf.Variables {
26 rv, ok := raw.(*config.ResourceVariable)
27 if !ok {
28 continue
29 }
30
31 // Build an address from the variable
32 varAddr := &ResourceAddress{
33 Path: addr.Path,
34 Mode: rv.Mode,
35 Type: rv.Type,
36 Name: rv.Name,
37 Index: rv.Index,
38 InstanceType: TypePrimary,
39 }
40
41 // If the variable access is a multi-access (*), then we just
42 // match the index so that we'll match our own addr if everything
43 // else matches.
44 if rv.Multi && rv.Index == -1 {
45 varAddr.Index = addr.Index
46 }
47
48 // This is a weird thing where ResourceAddres has index "-1" when
49 // index isn't set at all. This means index "0" for resource access.
50 // So, if we have this scenario, just set our varAddr to -1 so it
51 // matches.
52 if addr.Index == -1 && varAddr.Index == 0 {
53 varAddr.Index = -1
54 }
55
56 // If the addresses match, then this is a self reference
57 if varAddr.Equals(addr) && varAddr.Index == addr.Index {
58 errs = append(errs, fmt.Errorf(
59 "%s: self reference not allowed: %q",
60 addr, k))
61 }
62 }
63
64 // If no errors, no errors!
65 if len(errs) == 0 {
66 return nil, nil
67 }
68
69 // Wrap the errors in the proper wrapper so we can handle validation
70 // formatting properly upstream.
71 return nil, &EvalValidateError{
72 Errors: errs,
73 }
74}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
new file mode 100644
index 0000000..e39a33c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
@@ -0,0 +1,279 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "reflect"
7 "strconv"
8 "strings"
9
10 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/config/module"
12 "github.com/hashicorp/terraform/helper/hilmapstructure"
13)
14
15// EvalTypeCheckVariable is an EvalNode which ensures that the variable
16// values which are assigned as inputs to a module (including the root)
17// match the types which are either declared for the variables explicitly
18// or inferred from the default values.
19//
20// In order to achieve this three things are required:
21// - a map of the proposed variable values
22// - the configuration tree of the module in which the variable is
23// declared
24// - the path to the module (so we know which part of the tree to
25// compare the values against).
26type EvalTypeCheckVariable struct {
27 Variables map[string]interface{}
28 ModulePath []string
29 ModuleTree *module.Tree
30}
31
32func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) {
33 currentTree := n.ModuleTree
34 for _, pathComponent := range n.ModulePath[1:] {
35 currentTree = currentTree.Children()[pathComponent]
36 }
37 targetConfig := currentTree.Config()
38
39 prototypes := make(map[string]config.VariableType)
40 for _, variable := range targetConfig.Variables {
41 prototypes[variable.Name] = variable.Type()
42 }
43
44 // Only display a module in an error message if we are not in the root module
45 modulePathDescription := fmt.Sprintf(" in module %s", strings.Join(n.ModulePath[1:], "."))
46 if len(n.ModulePath) == 1 {
47 modulePathDescription = ""
48 }
49
50 for name, declaredType := range prototypes {
51 proposedValue, ok := n.Variables[name]
52 if !ok {
53 // This means the default value should be used as no overriding value
54 // has been set. Therefore we should continue as no check is necessary.
55 continue
56 }
57
58 if proposedValue == config.UnknownVariableValue {
59 continue
60 }
61
62 switch declaredType {
63 case config.VariableTypeString:
64 switch proposedValue.(type) {
65 case string:
66 continue
67 default:
68 return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
69 name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
70 }
71 case config.VariableTypeMap:
72 switch proposedValue.(type) {
73 case map[string]interface{}:
74 continue
75 default:
76 return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
77 name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
78 }
79 case config.VariableTypeList:
80 switch proposedValue.(type) {
81 case []interface{}:
82 continue
83 default:
84 return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
85 name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
86 }
87 default:
88 return nil, fmt.Errorf("variable %s%s should be type %s, got type string",
89 name, modulePathDescription, declaredType.Printable())
90 }
91 }
92
93 return nil, nil
94}
95
96// EvalSetVariables is an EvalNode implementation that sets the variables
97// explicitly for interpolation later.
98type EvalSetVariables struct {
99 Module *string
100 Variables map[string]interface{}
101}
102
103// TODO: test
104func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) {
105 ctx.SetVariables(*n.Module, n.Variables)
106 return nil, nil
107}
108
109// EvalVariableBlock is an EvalNode implementation that evaluates the
110// given configuration, and uses the final values as a way to set the
111// mapping.
112type EvalVariableBlock struct {
113 Config **ResourceConfig
114 VariableValues map[string]interface{}
115}
116
117func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) {
118 // Clear out the existing mapping
119 for k, _ := range n.VariableValues {
120 delete(n.VariableValues, k)
121 }
122
123 // Get our configuration
124 rc := *n.Config
125 for k, v := range rc.Config {
126 vKind := reflect.ValueOf(v).Type().Kind()
127
128 switch vKind {
129 case reflect.Slice:
130 var vSlice []interface{}
131 if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil {
132 n.VariableValues[k] = vSlice
133 continue
134 }
135 case reflect.Map:
136 var vMap map[string]interface{}
137 if err := hilmapstructure.WeakDecode(v, &vMap); err == nil {
138 n.VariableValues[k] = vMap
139 continue
140 }
141 default:
142 var vString string
143 if err := hilmapstructure.WeakDecode(v, &vString); err == nil {
144 n.VariableValues[k] = vString
145 continue
146 }
147 }
148
149 return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k)
150 }
151
152 for _, path := range rc.ComputedKeys {
153 log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path)
154 err := n.setUnknownVariableValueForPath(path)
155 if err != nil {
156 return nil, err
157 }
158 }
159
160 return nil, nil
161}
162
163func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error {
164 pathComponents := strings.Split(path, ".")
165
166 if len(pathComponents) < 1 {
167 return fmt.Errorf("No path comoponents in %s", path)
168 }
169
170 if len(pathComponents) == 1 {
171 // Special case the "top level" since we know the type
172 if _, ok := n.VariableValues[pathComponents[0]]; !ok {
173 n.VariableValues[pathComponents[0]] = config.UnknownVariableValue
174 }
175 return nil
176 }
177
178 // Otherwise find the correct point in the tree and then set to unknown
179 var current interface{} = n.VariableValues[pathComponents[0]]
180 for i := 1; i < len(pathComponents); i++ {
181 switch tCurrent := current.(type) {
182 case []interface{}:
183 index, err := strconv.Atoi(pathComponents[i])
184 if err != nil {
185 return fmt.Errorf("Cannot convert %s to slice index in path %s",
186 pathComponents[i], path)
187 }
188 current = tCurrent[index]
189 case []map[string]interface{}:
190 index, err := strconv.Atoi(pathComponents[i])
191 if err != nil {
192 return fmt.Errorf("Cannot convert %s to slice index in path %s",
193 pathComponents[i], path)
194 }
195 current = tCurrent[index]
196 case map[string]interface{}:
197 if val, hasVal := tCurrent[pathComponents[i]]; hasVal {
198 current = val
199 continue
200 }
201
202 tCurrent[pathComponents[i]] = config.UnknownVariableValue
203 break
204 }
205 }
206
207 return nil
208}
209
210// EvalCoerceMapVariable is an EvalNode implementation that recognizes a
211// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a
212// bare map literal is indistinguishable from a list of maps w/ one element.
213//
214// We take all the same inputs as EvalTypeCheckVariable above, since we need
215// both the target type and the proposed value in order to properly coerce.
216type EvalCoerceMapVariable struct {
217 Variables map[string]interface{}
218 ModulePath []string
219 ModuleTree *module.Tree
220}
221
222// Eval implements the EvalNode interface. See EvalCoerceMapVariable for
223// details.
224func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) {
225 currentTree := n.ModuleTree
226 for _, pathComponent := range n.ModulePath[1:] {
227 currentTree = currentTree.Children()[pathComponent]
228 }
229 targetConfig := currentTree.Config()
230
231 prototypes := make(map[string]config.VariableType)
232 for _, variable := range targetConfig.Variables {
233 prototypes[variable.Name] = variable.Type()
234 }
235
236 for name, declaredType := range prototypes {
237 if declaredType != config.VariableTypeMap {
238 continue
239 }
240
241 proposedValue, ok := n.Variables[name]
242 if !ok {
243 continue
244 }
245
246 if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 {
247 if m, ok := list[0].(map[string]interface{}); ok {
248 log.Printf("[DEBUG] EvalCoerceMapVariable: "+
249 "Coercing single element list into map: %#v", m)
250 n.Variables[name] = m
251 }
252 }
253 }
254
255 return nil, nil
256}
257
258// hclTypeName returns the name of the type that would represent this value in
259// a config file, or falls back to the Go type name if there's no corresponding
260// HCL type. This is used for formatted output, not for comparing types.
261func hclTypeName(i interface{}) string {
262 switch k := reflect.Indirect(reflect.ValueOf(i)).Kind(); k {
263 case reflect.Bool:
264 return "boolean"
265 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
266 reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
267 reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
268 return "number"
269 case reflect.Array, reflect.Slice:
270 return "list"
271 case reflect.Map:
272 return "map"
273 case reflect.String:
274 return "string"
275 default:
276 // fall back to the Go type if there's no match
277 return k.String()
278 }
279}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
new file mode 100644
index 0000000..00392ef
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
@@ -0,0 +1,119 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config"
5)
6
7// ProviderEvalTree returns the evaluation tree for initializing and
8// configuring providers.
9func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
10 var provider ResourceProvider
11 var resourceConfig *ResourceConfig
12
13 seq := make([]EvalNode, 0, 5)
14 seq = append(seq, &EvalInitProvider{Name: n})
15
16 // Input stuff
17 seq = append(seq, &EvalOpFilter{
18 Ops: []walkOperation{walkInput, walkImport},
19 Node: &EvalSequence{
20 Nodes: []EvalNode{
21 &EvalGetProvider{
22 Name: n,
23 Output: &provider,
24 },
25 &EvalInterpolate{
26 Config: config,
27 Output: &resourceConfig,
28 },
29 &EvalBuildProviderConfig{
30 Provider: n,
31 Config: &resourceConfig,
32 Output: &resourceConfig,
33 },
34 &EvalInputProvider{
35 Name: n,
36 Provider: &provider,
37 Config: &resourceConfig,
38 },
39 },
40 },
41 })
42
43 seq = append(seq, &EvalOpFilter{
44 Ops: []walkOperation{walkValidate},
45 Node: &EvalSequence{
46 Nodes: []EvalNode{
47 &EvalGetProvider{
48 Name: n,
49 Output: &provider,
50 },
51 &EvalInterpolate{
52 Config: config,
53 Output: &resourceConfig,
54 },
55 &EvalBuildProviderConfig{
56 Provider: n,
57 Config: &resourceConfig,
58 Output: &resourceConfig,
59 },
60 &EvalValidateProvider{
61 Provider: &provider,
62 Config: &resourceConfig,
63 },
64 &EvalSetProviderConfig{
65 Provider: n,
66 Config: &resourceConfig,
67 },
68 },
69 },
70 })
71
72 // Apply stuff
73 seq = append(seq, &EvalOpFilter{
74 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
75 Node: &EvalSequence{
76 Nodes: []EvalNode{
77 &EvalGetProvider{
78 Name: n,
79 Output: &provider,
80 },
81 &EvalInterpolate{
82 Config: config,
83 Output: &resourceConfig,
84 },
85 &EvalBuildProviderConfig{
86 Provider: n,
87 Config: &resourceConfig,
88 Output: &resourceConfig,
89 },
90 &EvalSetProviderConfig{
91 Provider: n,
92 Config: &resourceConfig,
93 },
94 },
95 },
96 })
97
98 // We configure on everything but validate, since validate may
99 // not have access to all the variables.
100 seq = append(seq, &EvalOpFilter{
101 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
102 Node: &EvalSequence{
103 Nodes: []EvalNode{
104 &EvalConfigProvider{
105 Provider: n,
106 Config: &resourceConfig,
107 },
108 },
109 },
110 })
111
112 return &EvalSequence{Nodes: seq}
113}
114
115// CloseProviderEvalTree returns the evaluation tree for closing
116// provider connections that aren't needed anymore.
117func CloseProviderEvalTree(n string) EvalNode {
118 return &EvalCloseProvider{Name: n}
119}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go
new file mode 100644
index 0000000..48ce6a3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go
@@ -0,0 +1,172 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "runtime/debug"
7 "strings"
8
9 "github.com/hashicorp/terraform/dag"
10)
11
12// RootModuleName is the name given to the root module implicitly.
13const RootModuleName = "root"
14
15// RootModulePath is the path for the root module.
16var RootModulePath = []string{RootModuleName}
17
18// Graph represents the graph that Terraform uses to represent resources
19// and their dependencies.
20type Graph struct {
21 // Graph is the actual DAG. This is embedded so you can call the DAG
22 // methods directly.
23 dag.AcyclicGraph
24
25 // Path is the path in the module tree that this Graph represents.
26 // The root is represented by a single element list containing
27 // RootModuleName
28 Path []string
29
30 // debugName is a name for reference in the debug output. This is usually
31 // to indicate what topmost builder was, and if this graph is a shadow or
32 // not.
33 debugName string
34}
35
36func (g *Graph) DirectedGraph() dag.Grapher {
37 return &g.AcyclicGraph
38}
39
40// Walk walks the graph with the given walker for callbacks. The graph
41// will be walked with full parallelism, so the walker should expect
42// to be called in concurrently.
43func (g *Graph) Walk(walker GraphWalker) error {
44 return g.walk(walker)
45}
46
47func (g *Graph) walk(walker GraphWalker) error {
48 // The callbacks for enter/exiting a graph
49 ctx := walker.EnterPath(g.Path)
50 defer walker.ExitPath(g.Path)
51
52 // Get the path for logs
53 path := strings.Join(ctx.Path(), ".")
54
55 // Determine if our walker is a panic wrapper
56 panicwrap, ok := walker.(GraphWalkerPanicwrapper)
57 if !ok {
58 panicwrap = nil // just to be sure
59 }
60
61 debugName := "walk-graph.json"
62 if g.debugName != "" {
63 debugName = g.debugName + "-" + debugName
64 }
65
66 debugBuf := dbug.NewFileWriter(debugName)
67 g.SetDebugWriter(debugBuf)
68 defer debugBuf.Close()
69
70 // Walk the graph.
71 var walkFn dag.WalkFunc
72 walkFn = func(v dag.Vertex) (rerr error) {
73 log.Printf("[DEBUG] vertex '%s.%s': walking", path, dag.VertexName(v))
74 g.DebugVisitInfo(v, g.debugName)
75
76 // If we have a panic wrap GraphWalker and a panic occurs, recover
77 // and call that. We ensure the return value is an error, however,
78 // so that future nodes are not called.
79 defer func() {
80 // If no panicwrap, do nothing
81 if panicwrap == nil {
82 return
83 }
84
85 // If no panic, do nothing
86 err := recover()
87 if err == nil {
88 return
89 }
90
91 // Modify the return value to show the error
92 rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s",
93 dag.VertexName(v), err, debug.Stack())
94
95 // Call the panic wrapper
96 panicwrap.Panic(v, err)
97 }()
98
99 walker.EnterVertex(v)
100 defer walker.ExitVertex(v, rerr)
101
102 // vertexCtx is the context that we use when evaluating. This
103 // is normally the context of our graph but can be overridden
104 // with a GraphNodeSubPath impl.
105 vertexCtx := ctx
106 if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 {
107 vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path()))
108 defer walker.ExitPath(pn.Path())
109 }
110
111 // If the node is eval-able, then evaluate it.
112 if ev, ok := v.(GraphNodeEvalable); ok {
113 tree := ev.EvalTree()
114 if tree == nil {
115 panic(fmt.Sprintf(
116 "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v))
117 }
118
119 // Allow the walker to change our tree if needed. Eval,
120 // then callback with the output.
121 log.Printf("[DEBUG] vertex '%s.%s': evaluating", path, dag.VertexName(v))
122
123 g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path))
124
125 tree = walker.EnterEvalTree(v, tree)
126 output, err := Eval(tree, vertexCtx)
127 if rerr = walker.ExitEvalTree(v, output, err); rerr != nil {
128 return
129 }
130 }
131
132 // If the node is dynamically expanded, then expand it
133 if ev, ok := v.(GraphNodeDynamicExpandable); ok {
134 log.Printf(
135 "[DEBUG] vertex '%s.%s': expanding/walking dynamic subgraph",
136 path,
137 dag.VertexName(v))
138
139 g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path))
140
141 g, err := ev.DynamicExpand(vertexCtx)
142 if err != nil {
143 rerr = err
144 return
145 }
146 if g != nil {
147 // Walk the subgraph
148 if rerr = g.walk(walker); rerr != nil {
149 return
150 }
151 }
152 }
153
154 // If the node has a subgraph, then walk the subgraph
155 if sn, ok := v.(GraphNodeSubgraph); ok {
156 log.Printf(
157 "[DEBUG] vertex '%s.%s': walking subgraph",
158 path,
159 dag.VertexName(v))
160
161 g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path))
162
163 if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil {
164 return
165 }
166 }
167
168 return nil
169 }
170
171 return g.AcyclicGraph.Walk(walkFn)
172}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
new file mode 100644
index 0000000..6374bb9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
@@ -0,0 +1,77 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strings"
7)
8
9// GraphBuilder is an interface that can be implemented and used with
10// Terraform to build the graph that Terraform walks.
11type GraphBuilder interface {
12 // Build builds the graph for the given module path. It is up to
13 // the interface implementation whether this build should expand
14 // the graph or not.
15 Build(path []string) (*Graph, error)
16}
17
18// BasicGraphBuilder is a GraphBuilder that builds a graph out of a
19// series of transforms and (optionally) validates the graph is a valid
20// structure.
21type BasicGraphBuilder struct {
22 Steps []GraphTransformer
23 Validate bool
24 // Optional name to add to the graph debug log
25 Name string
26}
27
28func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
29 g := &Graph{Path: path}
30
31 debugName := "graph.json"
32 if b.Name != "" {
33 debugName = b.Name + "-" + debugName
34 }
35 debugBuf := dbug.NewFileWriter(debugName)
36 g.SetDebugWriter(debugBuf)
37 defer debugBuf.Close()
38
39 for _, step := range b.Steps {
40 if step == nil {
41 continue
42 }
43
44 stepName := fmt.Sprintf("%T", step)
45 dot := strings.LastIndex(stepName, ".")
46 if dot >= 0 {
47 stepName = stepName[dot+1:]
48 }
49
50 debugOp := g.DebugOperation(stepName, "")
51 err := step.Transform(g)
52
53 errMsg := ""
54 if err != nil {
55 errMsg = err.Error()
56 }
57 debugOp.End(errMsg)
58
59 log.Printf(
60 "[TRACE] Graph after step %T:\n\n%s",
61 step, g.StringWithNodeTypes())
62
63 if err != nil {
64 return g, err
65 }
66 }
67
68 // Validate the graph structure
69 if b.Validate {
70 if err := g.Validate(); err != nil {
71 log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String())
72 return nil, err
73 }
74 }
75
76 return g, nil
77}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
new file mode 100644
index 0000000..38a90f2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
@@ -0,0 +1,141 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5 "github.com/hashicorp/terraform/dag"
6)
7
8// ApplyGraphBuilder implements GraphBuilder and is responsible for building
9// a graph for applying a Terraform diff.
10//
11// Because the graph is built from the diff (vs. the config or state),
12// this helps ensure that the apply-time graph doesn't modify any resources
13// that aren't explicitly in the diff. There are other scenarios where the
14// diff can be deviated, so this is just one layer of protection.
15type ApplyGraphBuilder struct {
16 // Module is the root module for the graph to build.
17 Module *module.Tree
18
19 // Diff is the diff to apply.
20 Diff *Diff
21
22 // State is the current state
23 State *State
24
25 // Providers is the list of providers supported.
26 Providers []string
27
28 // Provisioners is the list of provisioners supported.
29 Provisioners []string
30
31 // Targets are resources to target. This is only required to make sure
32 // unnecessary outputs aren't included in the apply graph. The plan
33 // builder successfully handles targeting resources. In the future,
34 // outputs should go into the diff so that this is unnecessary.
35 Targets []string
36
37 // DisableReduce, if true, will not reduce the graph. Great for testing.
38 DisableReduce bool
39
40 // Destroy, if true, represents a pure destroy operation
41 Destroy bool
42
43 // Validate will do structural validation of the graph.
44 Validate bool
45}
46
47// See GraphBuilder
48func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) {
49 return (&BasicGraphBuilder{
50 Steps: b.Steps(),
51 Validate: b.Validate,
52 Name: "ApplyGraphBuilder",
53 }).Build(path)
54}
55
56// See GraphBuilder
57func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
58 // Custom factory for creating providers.
59 concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
60 return &NodeApplyableProvider{
61 NodeAbstractProvider: a,
62 }
63 }
64
65 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
66 return &NodeApplyableResource{
67 NodeAbstractResource: a,
68 }
69 }
70
71 steps := []GraphTransformer{
72 // Creates all the nodes represented in the diff.
73 &DiffTransformer{
74 Concrete: concreteResource,
75
76 Diff: b.Diff,
77 Module: b.Module,
78 State: b.State,
79 },
80
81 // Create orphan output nodes
82 &OrphanOutputTransformer{Module: b.Module, State: b.State},
83
84 // Attach the configuration to any resources
85 &AttachResourceConfigTransformer{Module: b.Module},
86
87 // Attach the state
88 &AttachStateTransformer{State: b.State},
89
90 // Create all the providers
91 &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
92 &ProviderTransformer{},
93 &DisableProviderTransformer{},
94 &ParentProviderTransformer{},
95 &AttachProviderConfigTransformer{Module: b.Module},
96
97 // Destruction ordering
98 &DestroyEdgeTransformer{Module: b.Module, State: b.State},
99 GraphTransformIf(
100 func() bool { return !b.Destroy },
101 &CBDEdgeTransformer{Module: b.Module, State: b.State},
102 ),
103
104 // Provisioner-related transformations
105 &MissingProvisionerTransformer{Provisioners: b.Provisioners},
106 &ProvisionerTransformer{},
107
108 // Add root variables
109 &RootVariableTransformer{Module: b.Module},
110
111 // Add the outputs
112 &OutputTransformer{Module: b.Module},
113
114 // Add module variables
115 &ModuleVariableTransformer{Module: b.Module},
116
117 // Connect references so ordering is correct
118 &ReferenceTransformer{},
119
120 // Add the node to fix the state count boundaries
121 &CountBoundaryTransformer{},
122
123 // Target
124 &TargetsTransformer{Targets: b.Targets},
125
126 // Close opened plugin connections
127 &CloseProviderTransformer{},
128 &CloseProvisionerTransformer{},
129
130 // Single root
131 &RootTransformer{},
132 }
133
134 if !b.DisableReduce {
135 // Perform the transitive reduction to make our graph a bit
136 // more sane if possible (it usually is possible).
137 steps = append(steps, &TransitiveReductionTransformer{})
138 }
139
140 return steps
141}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
new file mode 100644
index 0000000..014b348
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
@@ -0,0 +1,67 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5 "github.com/hashicorp/terraform/dag"
6)
7
8// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for
9// planning a pure-destroy.
10//
11// Planning a pure destroy operation is simple because we can ignore most
12// ordering configuration and simply reverse the state.
13type DestroyPlanGraphBuilder struct {
14 // Module is the root module for the graph to build.
15 Module *module.Tree
16
17 // State is the current state
18 State *State
19
20 // Targets are resources to target
21 Targets []string
22
23 // Validate will do structural validation of the graph.
24 Validate bool
25}
26
27// See GraphBuilder
28func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) {
29 return (&BasicGraphBuilder{
30 Steps: b.Steps(),
31 Validate: b.Validate,
32 Name: "DestroyPlanGraphBuilder",
33 }).Build(path)
34}
35
36// See GraphBuilder
37func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
38 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
39 return &NodePlanDestroyableResource{
40 NodeAbstractResource: a,
41 }
42 }
43
44 steps := []GraphTransformer{
45 // Creates all the nodes represented in the state.
46 &StateTransformer{
47 Concrete: concreteResource,
48 State: b.State,
49 },
50
51 // Attach the configuration to any resources
52 &AttachResourceConfigTransformer{Module: b.Module},
53
54 // Destruction ordering. We require this only so that
55 // targeting below will prune the correct things.
56 &DestroyEdgeTransformer{Module: b.Module, State: b.State},
57
58 // Target. Note we don't set "Destroy: true" here since we already
59 // created proper destroy ordering.
60 &TargetsTransformer{Targets: b.Targets},
61
62 // Single root
63 &RootTransformer{},
64 }
65
66 return steps
67}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
new file mode 100644
index 0000000..7070c59
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
@@ -0,0 +1,76 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5 "github.com/hashicorp/terraform/dag"
6)
7
8// ImportGraphBuilder implements GraphBuilder and is responsible for building
9// a graph for importing resources into Terraform. This is a much, much
10// simpler graph than a normal configuration graph.
11type ImportGraphBuilder struct {
12 // ImportTargets are the list of resources to import.
13 ImportTargets []*ImportTarget
14
15 // Module is the module to add to the graph. See ImportOpts.Module.
16 Module *module.Tree
17
18 // Providers is the list of providers supported.
19 Providers []string
20}
21
22// Build builds the graph according to the steps returned by Steps.
23func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) {
24 return (&BasicGraphBuilder{
25 Steps: b.Steps(),
26 Validate: true,
27 Name: "ImportGraphBuilder",
28 }).Build(path)
29}
30
31// Steps returns the ordered list of GraphTransformers that must be executed
32// to build a complete graph.
33func (b *ImportGraphBuilder) Steps() []GraphTransformer {
34 // Get the module. If we don't have one, we just use an empty tree
35 // so that the transform still works but does nothing.
36 mod := b.Module
37 if mod == nil {
38 mod = module.NewEmptyTree()
39 }
40
41 // Custom factory for creating providers.
42 concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
43 return &NodeApplyableProvider{
44 NodeAbstractProvider: a,
45 }
46 }
47
48 steps := []GraphTransformer{
49 // Create all our resources from the configuration and state
50 &ConfigTransformer{Module: mod},
51
52 // Add the import steps
53 &ImportStateTransformer{Targets: b.ImportTargets},
54
55 // Provider-related transformations
56 &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
57 &ProviderTransformer{},
58 &DisableProviderTransformer{},
59 &ParentProviderTransformer{},
60 &AttachProviderConfigTransformer{Module: mod},
61
62 // This validates that the providers only depend on variables
63 &ImportProviderValidateTransformer{},
64
65 // Close opened plugin connections
66 &CloseProviderTransformer{},
67
68 // Single root
69 &RootTransformer{},
70
71 // Optimize
72 &TransitiveReductionTransformer{},
73 }
74
75 return steps
76}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
new file mode 100644
index 0000000..0df48cd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
@@ -0,0 +1,27 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// InputGraphBuilder creates the graph for the input operation.
8//
9// Unlike other graph builders, this is a function since it currently modifies
10// and is based on the PlanGraphBuilder. The PlanGraphBuilder passed in will be
11// modified and should not be used for any other operations.
12func InputGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
13 // We're going to customize the concrete functions
14 p.CustomConcrete = true
15
16 // Set the provider to the normal provider. This will ask for input.
17 p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
18 return &NodeApplyableProvider{
19 NodeAbstractProvider: a,
20 }
21 }
22
23 // We purposely don't set any more concrete fields since the remainder
24 // should be no-ops.
25
26 return p
27}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
new file mode 100644
index 0000000..a6a3a90
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
@@ -0,0 +1,164 @@
1package terraform
2
3import (
4 "sync"
5
6 "github.com/hashicorp/terraform/config/module"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// PlanGraphBuilder implements GraphBuilder and is responsible for building
11// a graph for planning (creating a Terraform Diff).
12//
13// The primary difference between this graph and others:
14//
15// * Based on the config since it represents the target state
16//
17// * Ignores lifecycle options since no lifecycle events occur here. This
18// simplifies the graph significantly since complex transforms such as
19// create-before-destroy can be completely ignored.
20//
21type PlanGraphBuilder struct {
22 // Module is the root module for the graph to build.
23 Module *module.Tree
24
25 // State is the current state
26 State *State
27
28 // Providers is the list of providers supported.
29 Providers []string
30
31 // Provisioners is the list of provisioners supported.
32 Provisioners []string
33
34 // Targets are resources to target
35 Targets []string
36
37 // DisableReduce, if true, will not reduce the graph. Great for testing.
38 DisableReduce bool
39
40 // Validate will do structural validation of the graph.
41 Validate bool
42
43 // CustomConcrete can be set to customize the node types created
44 // for various parts of the plan. This is useful in order to customize
45 // the plan behavior.
46 CustomConcrete bool
47 ConcreteProvider ConcreteProviderNodeFunc
48 ConcreteResource ConcreteResourceNodeFunc
49 ConcreteResourceOrphan ConcreteResourceNodeFunc
50
51 once sync.Once
52}
53
54// See GraphBuilder
55func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) {
56 return (&BasicGraphBuilder{
57 Steps: b.Steps(),
58 Validate: b.Validate,
59 Name: "PlanGraphBuilder",
60 }).Build(path)
61}
62
63// See GraphBuilder
64func (b *PlanGraphBuilder) Steps() []GraphTransformer {
65 b.once.Do(b.init)
66
67 steps := []GraphTransformer{
68 // Creates all the resources represented in the config
69 &ConfigTransformer{
70 Concrete: b.ConcreteResource,
71 Module: b.Module,
72 },
73
74 // Add the outputs
75 &OutputTransformer{Module: b.Module},
76
77 // Add orphan resources
78 &OrphanResourceTransformer{
79 Concrete: b.ConcreteResourceOrphan,
80 State: b.State,
81 Module: b.Module,
82 },
83
84 // Attach the configuration to any resources
85 &AttachResourceConfigTransformer{Module: b.Module},
86
87 // Attach the state
88 &AttachStateTransformer{State: b.State},
89
90 // Add root variables
91 &RootVariableTransformer{Module: b.Module},
92
93 // Create all the providers
94 &MissingProviderTransformer{Providers: b.Providers, Concrete: b.ConcreteProvider},
95 &ProviderTransformer{},
96 &DisableProviderTransformer{},
97 &ParentProviderTransformer{},
98 &AttachProviderConfigTransformer{Module: b.Module},
99
100 // Provisioner-related transformations. Only add these if requested.
101 GraphTransformIf(
102 func() bool { return b.Provisioners != nil },
103 GraphTransformMulti(
104 &MissingProvisionerTransformer{Provisioners: b.Provisioners},
105 &ProvisionerTransformer{},
106 ),
107 ),
108
109 // Add module variables
110 &ModuleVariableTransformer{Module: b.Module},
111
112 // Connect so that the references are ready for targeting. We'll
113 // have to connect again later for providers and so on.
114 &ReferenceTransformer{},
115
116 // Add the node to fix the state count boundaries
117 &CountBoundaryTransformer{},
118
119 // Target
120 &TargetsTransformer{Targets: b.Targets},
121
122 // Close opened plugin connections
123 &CloseProviderTransformer{},
124 &CloseProvisionerTransformer{},
125
126 // Single root
127 &RootTransformer{},
128 }
129
130 if !b.DisableReduce {
131 // Perform the transitive reduction to make our graph a bit
132 // more sane if possible (it usually is possible).
133 steps = append(steps, &TransitiveReductionTransformer{})
134 }
135
136 return steps
137}
138
139func (b *PlanGraphBuilder) init() {
140 // Do nothing if the user requests customizing the fields
141 if b.CustomConcrete {
142 return
143 }
144
145 b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
146 return &NodeApplyableProvider{
147 NodeAbstractProvider: a,
148 }
149 }
150
151 b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
152 return &NodePlannableResource{
153 NodeAbstractCountResource: &NodeAbstractCountResource{
154 NodeAbstractResource: a,
155 },
156 }
157 }
158
159 b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex {
160 return &NodePlannableResourceOrphan{
161 NodeAbstractResource: a,
162 }
163 }
164}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
new file mode 100644
index 0000000..88ae338
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
@@ -0,0 +1,132 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config"
5 "github.com/hashicorp/terraform/config/module"
6 "github.com/hashicorp/terraform/dag"
7)
8
9// RefreshGraphBuilder implements GraphBuilder and is responsible for building
10// a graph for refreshing (updating the Terraform state).
11//
12// The primary difference between this graph and others:
13//
14// * Based on the state since it represents the only resources that
15// need to be refreshed.
16//
17// * Ignores lifecycle options since no lifecycle events occur here. This
18// simplifies the graph significantly since complex transforms such as
19// create-before-destroy can be completely ignored.
20//
21type RefreshGraphBuilder struct {
22 // Module is the root module for the graph to build.
23 Module *module.Tree
24
25 // State is the current state
26 State *State
27
28 // Providers is the list of providers supported.
29 Providers []string
30
31 // Targets are resources to target
32 Targets []string
33
34 // DisableReduce, if true, will not reduce the graph. Great for testing.
35 DisableReduce bool
36
37 // Validate will do structural validation of the graph.
38 Validate bool
39}
40
41// See GraphBuilder
42func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) {
43 return (&BasicGraphBuilder{
44 Steps: b.Steps(),
45 Validate: b.Validate,
46 Name: "RefreshGraphBuilder",
47 }).Build(path)
48}
49
50// See GraphBuilder
51func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
52 // Custom factory for creating providers.
53 concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
54 return &NodeApplyableProvider{
55 NodeAbstractProvider: a,
56 }
57 }
58
59 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
60 return &NodeRefreshableResource{
61 NodeAbstractResource: a,
62 }
63 }
64
65 concreteDataResource := func(a *NodeAbstractResource) dag.Vertex {
66 return &NodeRefreshableDataResource{
67 NodeAbstractCountResource: &NodeAbstractCountResource{
68 NodeAbstractResource: a,
69 },
70 }
71 }
72
73 steps := []GraphTransformer{
74 // Creates all the resources represented in the state
75 &StateTransformer{
76 Concrete: concreteResource,
77 State: b.State,
78 },
79
80 // Creates all the data resources that aren't in the state
81 &ConfigTransformer{
82 Concrete: concreteDataResource,
83 Module: b.Module,
84 Unique: true,
85 ModeFilter: true,
86 Mode: config.DataResourceMode,
87 },
88
89 // Attach the state
90 &AttachStateTransformer{State: b.State},
91
92 // Attach the configuration to any resources
93 &AttachResourceConfigTransformer{Module: b.Module},
94
95 // Add root variables
96 &RootVariableTransformer{Module: b.Module},
97
98 // Create all the providers
99 &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
100 &ProviderTransformer{},
101 &DisableProviderTransformer{},
102 &ParentProviderTransformer{},
103 &AttachProviderConfigTransformer{Module: b.Module},
104
105 // Add the outputs
106 &OutputTransformer{Module: b.Module},
107
108 // Add module variables
109 &ModuleVariableTransformer{Module: b.Module},
110
111 // Connect so that the references are ready for targeting. We'll
112 // have to connect again later for providers and so on.
113 &ReferenceTransformer{},
114
115 // Target
116 &TargetsTransformer{Targets: b.Targets},
117
118 // Close opened plugin connections
119 &CloseProviderTransformer{},
120
121 // Single root
122 &RootTransformer{},
123 }
124
125 if !b.DisableReduce {
126 // Perform the transitive reduction to make our graph a bit
127 // more sane if possible (it usually is possible).
128 steps = append(steps, &TransitiveReductionTransformer{})
129 }
130
131 return steps
132}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
new file mode 100644
index 0000000..645ec7b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
@@ -0,0 +1,36 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// ValidateGraphBuilder creates the graph for the validate operation.
8//
9// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that
10// we only have to validate what we'd normally plan anyways. The
11// PlanGraphBuilder given will be modified so it shouldn't be used for anything
12// else after calling this function.
13func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
14 // We're going to customize the concrete functions
15 p.CustomConcrete = true
16
17 // Set the provider to the normal provider. This will ask for input.
18 p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
19 return &NodeApplyableProvider{
20 NodeAbstractProvider: a,
21 }
22 }
23
24 p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
25 return &NodeValidatableResource{
26 NodeAbstractCountResource: &NodeAbstractCountResource{
27 NodeAbstractResource: a,
28 },
29 }
30 }
31
32 // We purposely don't set any other concrete types since they don't
33 // require validation.
34
35 return p
36}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go
new file mode 100644
index 0000000..73e3821
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go
@@ -0,0 +1,9 @@
1package terraform
2
3import "github.com/hashicorp/terraform/dag"
4
5// GraphDot returns the dot formatting of a visual representation of
6// the given Terraform graph.
7func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) {
8 return string(g.Dot(opts)), nil
9}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
new file mode 100644
index 0000000..2897eb5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
@@ -0,0 +1,7 @@
1package terraform
2
3// GraphNodeSubPath says that a node is part of a graph with a
4// different path, and the context should be adjusted accordingly.
5type GraphNodeSubPath interface {
6 Path() []string
7}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
new file mode 100644
index 0000000..34ce6f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
@@ -0,0 +1,60 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// GraphWalker is an interface that can be implemented that when used
8// with Graph.Walk will invoke the given callbacks under certain events.
9type GraphWalker interface {
10 EnterPath([]string) EvalContext
11 ExitPath([]string)
12 EnterVertex(dag.Vertex)
13 ExitVertex(dag.Vertex, error)
14 EnterEvalTree(dag.Vertex, EvalNode) EvalNode
15 ExitEvalTree(dag.Vertex, interface{}, error) error
16}
17
18// GrpahWalkerPanicwrapper can be optionally implemented to catch panics
19// that occur while walking the graph. This is not generally recommended
20// since panics should crash Terraform and result in a bug report. However,
21// this is particularly useful for situations like the shadow graph where
22// you don't ever want to cause a panic.
23type GraphWalkerPanicwrapper interface {
24 GraphWalker
25
26 // Panic is called when a panic occurs. This will halt the panic from
27 // propogating so if the walker wants it to crash still it should panic
28 // again. This is called from within a defer so runtime/debug.Stack can
29 // be used to get the stack trace of the panic.
30 Panic(dag.Vertex, interface{})
31}
32
33// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow
34// the panics. This doesn't lose the panics since the panics are still
35// returned as errors as part of a graph walk.
36func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper {
37 return &graphWalkerPanicwrapper{
38 GraphWalker: w,
39 }
40}
41
42type graphWalkerPanicwrapper struct {
43 GraphWalker
44}
45
46func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {}
47
48// NullGraphWalker is a GraphWalker implementation that does nothing.
49// This can be embedded within other GraphWalker implementations for easily
50// implementing all the required functions.
51type NullGraphWalker struct{}
52
53func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) }
54func (NullGraphWalker) ExitPath([]string) {}
55func (NullGraphWalker) EnterVertex(dag.Vertex) {}
56func (NullGraphWalker) ExitVertex(dag.Vertex, error) {}
57func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n }
58func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error {
59 return nil
60}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
new file mode 100644
index 0000000..e63b460
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
@@ -0,0 +1,157 @@
1package terraform
2
3import (
4 "context"
5 "fmt"
6 "log"
7 "sync"
8
9 "github.com/hashicorp/errwrap"
10 "github.com/hashicorp/terraform/dag"
11)
12
13// ContextGraphWalker is the GraphWalker implementation used with the
14// Context struct to walk and evaluate the graph.
15type ContextGraphWalker struct {
16 NullGraphWalker
17
18 // Configurable values
19 Context *Context
20 Operation walkOperation
21 StopContext context.Context
22
23 // Outputs, do not set these. Do not read these while the graph
24 // is being walked.
25 ValidationWarnings []string
26 ValidationErrors []error
27
28 errorLock sync.Mutex
29 once sync.Once
30 contexts map[string]*BuiltinEvalContext
31 contextLock sync.Mutex
32 interpolaterVars map[string]map[string]interface{}
33 interpolaterVarLock sync.Mutex
34 providerCache map[string]ResourceProvider
35 providerConfigCache map[string]*ResourceConfig
36 providerLock sync.Mutex
37 provisionerCache map[string]ResourceProvisioner
38 provisionerLock sync.Mutex
39}
40
41func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
42 w.once.Do(w.init)
43
44 w.contextLock.Lock()
45 defer w.contextLock.Unlock()
46
47 // If we already have a context for this path cached, use that
48 key := PathCacheKey(path)
49 if ctx, ok := w.contexts[key]; ok {
50 return ctx
51 }
52
53 // Setup the variables for this interpolater
54 variables := make(map[string]interface{})
55 if len(path) <= 1 {
56 for k, v := range w.Context.variables {
57 variables[k] = v
58 }
59 }
60 w.interpolaterVarLock.Lock()
61 if m, ok := w.interpolaterVars[key]; ok {
62 for k, v := range m {
63 variables[k] = v
64 }
65 }
66 w.interpolaterVars[key] = variables
67 w.interpolaterVarLock.Unlock()
68
69 ctx := &BuiltinEvalContext{
70 StopContext: w.StopContext,
71 PathValue: path,
72 Hooks: w.Context.hooks,
73 InputValue: w.Context.uiInput,
74 Components: w.Context.components,
75 ProviderCache: w.providerCache,
76 ProviderConfigCache: w.providerConfigCache,
77 ProviderInputConfig: w.Context.providerInputConfig,
78 ProviderLock: &w.providerLock,
79 ProvisionerCache: w.provisionerCache,
80 ProvisionerLock: &w.provisionerLock,
81 DiffValue: w.Context.diff,
82 DiffLock: &w.Context.diffLock,
83 StateValue: w.Context.state,
84 StateLock: &w.Context.stateLock,
85 Interpolater: &Interpolater{
86 Operation: w.Operation,
87 Meta: w.Context.meta,
88 Module: w.Context.module,
89 State: w.Context.state,
90 StateLock: &w.Context.stateLock,
91 VariableValues: variables,
92 VariableValuesLock: &w.interpolaterVarLock,
93 },
94 InterpolaterVars: w.interpolaterVars,
95 InterpolaterVarLock: &w.interpolaterVarLock,
96 }
97
98 w.contexts[key] = ctx
99 return ctx
100}
101
102func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
103 log.Printf("[TRACE] [%s] Entering eval tree: %s",
104 w.Operation, dag.VertexName(v))
105
106 // Acquire a lock on the semaphore
107 w.Context.parallelSem.Acquire()
108
109 // We want to filter the evaluation tree to only include operations
110 // that belong in this operation.
111 return EvalFilter(n, EvalNodeFilterOp(w.Operation))
112}
113
114func (w *ContextGraphWalker) ExitEvalTree(
115 v dag.Vertex, output interface{}, err error) error {
116 log.Printf("[TRACE] [%s] Exiting eval tree: %s",
117 w.Operation, dag.VertexName(v))
118
119 // Release the semaphore
120 w.Context.parallelSem.Release()
121
122 if err == nil {
123 return nil
124 }
125
126 // Acquire the lock because anything is going to require a lock.
127 w.errorLock.Lock()
128 defer w.errorLock.Unlock()
129
130 // Try to get a validation error out of it. If its not a validation
131 // error, then just record the normal error.
132 verr, ok := err.(*EvalValidateError)
133 if !ok {
134 return err
135 }
136
137 for _, msg := range verr.Warnings {
138 w.ValidationWarnings = append(
139 w.ValidationWarnings,
140 fmt.Sprintf("%s: %s", dag.VertexName(v), msg))
141 }
142 for _, e := range verr.Errors {
143 w.ValidationErrors = append(
144 w.ValidationErrors,
145 errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e))
146 }
147
148 return nil
149}
150
151func (w *ContextGraphWalker) init() {
152 w.contexts = make(map[string]*BuiltinEvalContext, 5)
153 w.providerCache = make(map[string]ResourceProvider, 5)
154 w.providerConfigCache = make(map[string]*ResourceConfig, 5)
155 w.provisionerCache = make(map[string]ResourceProvisioner, 5)
156 w.interpolaterVars = make(map[string]map[string]interface{}, 5)
157}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
new file mode 100644
index 0000000..3fb3748
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
@@ -0,0 +1,18 @@
1package terraform
2
3//go:generate stringer -type=walkOperation graph_walk_operation.go
4
5// walkOperation is an enum which tells the walkContext what to do.
6type walkOperation byte
7
8const (
9 walkInvalid walkOperation = iota
10 walkInput
11 walkApply
12 walkPlan
13 walkPlanDestroy
14 walkRefresh
15 walkValidate
16 walkDestroy
17 walkImport
18)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
new file mode 100644
index 0000000..e97b485
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT.
2
3package terraform
4
5import "fmt"
6
7const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate"
8
9var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125}
10
11func (i GraphType) String() string {
12 if i >= GraphType(len(_GraphType_index)-1) {
13 return fmt.Sprintf("GraphType(%d)", i)
14 }
15 return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go
new file mode 100644
index 0000000..ab11e8e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook.go
@@ -0,0 +1,137 @@
1package terraform
2
3// HookAction is an enum of actions that can be taken as a result of a hook
4// callback. This allows you to modify the behavior of Terraform at runtime.
5type HookAction byte
6
7const (
8 // HookActionContinue continues with processing as usual.
9 HookActionContinue HookAction = iota
10
11 // HookActionHalt halts immediately: no more hooks are processed
12 // and the action that Terraform was about to take is cancelled.
13 HookActionHalt
14)
15
16// Hook is the interface that must be implemented to hook into various
17// parts of Terraform, allowing you to inspect or change behavior at runtime.
18//
19// There are MANY hook points into Terraform. If you only want to implement
20// some hook points, but not all (which is the likely case), then embed the
21// NilHook into your struct, which implements all of the interface but does
22// nothing. Then, override only the functions you want to implement.
23type Hook interface {
24 // PreApply and PostApply are called before and after a single
25 // resource is applied. The error argument in PostApply is the
26 // error, if any, that was returned from the provider Apply call itself.
27 PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error)
28 PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error)
29
30 // PreDiff and PostDiff are called before and after a single resource
31 // resource is diffed.
32 PreDiff(*InstanceInfo, *InstanceState) (HookAction, error)
33 PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error)
34
35 // Provisioning hooks
36 //
37 // All should be self-explanatory. ProvisionOutput is called with
38 // output sent back by the provisioners. This will be called multiple
39 // times as output comes in, but each call should represent a line of
40 // output. The ProvisionOutput method cannot control whether the
41 // hook continues running.
42 PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
43 PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
44 PreProvision(*InstanceInfo, string) (HookAction, error)
45 PostProvision(*InstanceInfo, string, error) (HookAction, error)
46 ProvisionOutput(*InstanceInfo, string, string)
47
48 // PreRefresh and PostRefresh are called before and after a single
49 // resource state is refreshed, respectively.
50 PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
51 PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
52
53 // PostStateUpdate is called after the state is updated.
54 PostStateUpdate(*State) (HookAction, error)
55
56 // PreImportState and PostImportState are called before and after
57 // a single resource's state is being improted.
58 PreImportState(*InstanceInfo, string) (HookAction, error)
59 PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error)
60}
61
62// NilHook is a Hook implementation that does nothing. It exists only to
63// simplify implementing hooks. You can embed this into your Hook implementation
64// and only implement the functions you are interested in.
65type NilHook struct{}
66
67func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
68 return HookActionContinue, nil
69}
70
71func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
72 return HookActionContinue, nil
73}
74
75func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
76 return HookActionContinue, nil
77}
78
79func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
80 return HookActionContinue, nil
81}
82
83func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
84 return HookActionContinue, nil
85}
86
87func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
88 return HookActionContinue, nil
89}
90
91func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
92 return HookActionContinue, nil
93}
94
95func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
96 return HookActionContinue, nil
97}
98
99func (*NilHook) ProvisionOutput(
100 *InstanceInfo, string, string) {
101}
102
103func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
104 return HookActionContinue, nil
105}
106
107func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
108 return HookActionContinue, nil
109}
110
111func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
112 return HookActionContinue, nil
113}
114
115func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
116 return HookActionContinue, nil
117}
118
119func (*NilHook) PostStateUpdate(*State) (HookAction, error) {
120 return HookActionContinue, nil
121}
122
123// handleHook turns hook actions into panics. This lets you use the
124// panic/recover mechanism in Go as a flow control mechanism for hook
125// actions.
126func handleHook(a HookAction, err error) {
127 if err != nil {
128 // TODO: handle errors
129 }
130
131 switch a {
132 case HookActionContinue:
133 return
134 case HookActionHalt:
135 panic(HookActionHalt)
136 }
137}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
new file mode 100644
index 0000000..0e46400
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
@@ -0,0 +1,245 @@
1package terraform
2
3import "sync"
4
5// MockHook is an implementation of Hook that can be used for tests.
6// It records all of its function calls.
7type MockHook struct {
8 sync.Mutex
9
10 PreApplyCalled bool
11 PreApplyInfo *InstanceInfo
12 PreApplyDiff *InstanceDiff
13 PreApplyState *InstanceState
14 PreApplyReturn HookAction
15 PreApplyError error
16
17 PostApplyCalled bool
18 PostApplyInfo *InstanceInfo
19 PostApplyState *InstanceState
20 PostApplyError error
21 PostApplyReturn HookAction
22 PostApplyReturnError error
23 PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error)
24
25 PreDiffCalled bool
26 PreDiffInfo *InstanceInfo
27 PreDiffState *InstanceState
28 PreDiffReturn HookAction
29 PreDiffError error
30
31 PostDiffCalled bool
32 PostDiffInfo *InstanceInfo
33 PostDiffDiff *InstanceDiff
34 PostDiffReturn HookAction
35 PostDiffError error
36
37 PreProvisionResourceCalled bool
38 PreProvisionResourceInfo *InstanceInfo
39 PreProvisionInstanceState *InstanceState
40 PreProvisionResourceReturn HookAction
41 PreProvisionResourceError error
42
43 PostProvisionResourceCalled bool
44 PostProvisionResourceInfo *InstanceInfo
45 PostProvisionInstanceState *InstanceState
46 PostProvisionResourceReturn HookAction
47 PostProvisionResourceError error
48
49 PreProvisionCalled bool
50 PreProvisionInfo *InstanceInfo
51 PreProvisionProvisionerId string
52 PreProvisionReturn HookAction
53 PreProvisionError error
54
55 PostProvisionCalled bool
56 PostProvisionInfo *InstanceInfo
57 PostProvisionProvisionerId string
58 PostProvisionErrorArg error
59 PostProvisionReturn HookAction
60 PostProvisionError error
61
62 ProvisionOutputCalled bool
63 ProvisionOutputInfo *InstanceInfo
64 ProvisionOutputProvisionerId string
65 ProvisionOutputMessage string
66
67 PostRefreshCalled bool
68 PostRefreshInfo *InstanceInfo
69 PostRefreshState *InstanceState
70 PostRefreshReturn HookAction
71 PostRefreshError error
72
73 PreRefreshCalled bool
74 PreRefreshInfo *InstanceInfo
75 PreRefreshState *InstanceState
76 PreRefreshReturn HookAction
77 PreRefreshError error
78
79 PreImportStateCalled bool
80 PreImportStateInfo *InstanceInfo
81 PreImportStateId string
82 PreImportStateReturn HookAction
83 PreImportStateError error
84
85 PostImportStateCalled bool
86 PostImportStateInfo *InstanceInfo
87 PostImportStateState []*InstanceState
88 PostImportStateReturn HookAction
89 PostImportStateError error
90
91 PostStateUpdateCalled bool
92 PostStateUpdateState *State
93 PostStateUpdateReturn HookAction
94 PostStateUpdateError error
95}
96
97func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) {
98 h.Lock()
99 defer h.Unlock()
100
101 h.PreApplyCalled = true
102 h.PreApplyInfo = n
103 h.PreApplyDiff = d
104 h.PreApplyState = s
105 return h.PreApplyReturn, h.PreApplyError
106}
107
108func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) {
109 h.Lock()
110 defer h.Unlock()
111
112 h.PostApplyCalled = true
113 h.PostApplyInfo = n
114 h.PostApplyState = s
115 h.PostApplyError = e
116
117 if h.PostApplyFn != nil {
118 return h.PostApplyFn(n, s, e)
119 }
120
121 return h.PostApplyReturn, h.PostApplyReturnError
122}
123
124func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) {
125 h.Lock()
126 defer h.Unlock()
127
128 h.PreDiffCalled = true
129 h.PreDiffInfo = n
130 h.PreDiffState = s
131 return h.PreDiffReturn, h.PreDiffError
132}
133
134func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) {
135 h.Lock()
136 defer h.Unlock()
137
138 h.PostDiffCalled = true
139 h.PostDiffInfo = n
140 h.PostDiffDiff = d
141 return h.PostDiffReturn, h.PostDiffError
142}
143
144func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
145 h.Lock()
146 defer h.Unlock()
147
148 h.PreProvisionResourceCalled = true
149 h.PreProvisionResourceInfo = n
150 h.PreProvisionInstanceState = s
151 return h.PreProvisionResourceReturn, h.PreProvisionResourceError
152}
153
154func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
155 h.Lock()
156 defer h.Unlock()
157
158 h.PostProvisionResourceCalled = true
159 h.PostProvisionResourceInfo = n
160 h.PostProvisionInstanceState = s
161 return h.PostProvisionResourceReturn, h.PostProvisionResourceError
162}
163
164func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) {
165 h.Lock()
166 defer h.Unlock()
167
168 h.PreProvisionCalled = true
169 h.PreProvisionInfo = n
170 h.PreProvisionProvisionerId = provId
171 return h.PreProvisionReturn, h.PreProvisionError
172}
173
174func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) {
175 h.Lock()
176 defer h.Unlock()
177
178 h.PostProvisionCalled = true
179 h.PostProvisionInfo = n
180 h.PostProvisionProvisionerId = provId
181 h.PostProvisionErrorArg = err
182 return h.PostProvisionReturn, h.PostProvisionError
183}
184
185func (h *MockHook) ProvisionOutput(
186 n *InstanceInfo,
187 provId string,
188 msg string) {
189 h.Lock()
190 defer h.Unlock()
191
192 h.ProvisionOutputCalled = true
193 h.ProvisionOutputInfo = n
194 h.ProvisionOutputProvisionerId = provId
195 h.ProvisionOutputMessage = msg
196}
197
198func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
199 h.Lock()
200 defer h.Unlock()
201
202 h.PreRefreshCalled = true
203 h.PreRefreshInfo = n
204 h.PreRefreshState = s
205 return h.PreRefreshReturn, h.PreRefreshError
206}
207
208func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
209 h.Lock()
210 defer h.Unlock()
211
212 h.PostRefreshCalled = true
213 h.PostRefreshInfo = n
214 h.PostRefreshState = s
215 return h.PostRefreshReturn, h.PostRefreshError
216}
217
218func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) {
219 h.Lock()
220 defer h.Unlock()
221
222 h.PreImportStateCalled = true
223 h.PreImportStateInfo = info
224 h.PreImportStateId = id
225 return h.PreImportStateReturn, h.PreImportStateError
226}
227
228func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) {
229 h.Lock()
230 defer h.Unlock()
231
232 h.PostImportStateCalled = true
233 h.PostImportStateInfo = info
234 h.PostImportStateState = s
235 return h.PostImportStateReturn, h.PostImportStateError
236}
237
238func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) {
239 h.Lock()
240 defer h.Unlock()
241
242 h.PostStateUpdateCalled = true
243 h.PostStateUpdateState = s
244 return h.PostStateUpdateReturn, h.PostStateUpdateError
245}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
new file mode 100644
index 0000000..104d009
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
@@ -0,0 +1,87 @@
1package terraform
2
3import (
4 "sync/atomic"
5)
6
7// stopHook is a private Hook implementation that Terraform uses to
8// signal when to stop or cancel actions.
9type stopHook struct {
10 stop uint32
11}
12
13func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
14 return h.hook()
15}
16
17func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
18 return h.hook()
19}
20
21func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
22 return h.hook()
23}
24
25func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
26 return h.hook()
27}
28
29func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
30 return h.hook()
31}
32
33func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
34 return h.hook()
35}
36
37func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
38 return h.hook()
39}
40
41func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
42 return h.hook()
43}
44
45func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) {
46}
47
48func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
49 return h.hook()
50}
51
52func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
53 return h.hook()
54}
55
56func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
57 return h.hook()
58}
59
60func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
61 return h.hook()
62}
63
64func (h *stopHook) PostStateUpdate(*State) (HookAction, error) {
65 return h.hook()
66}
67
68func (h *stopHook) hook() (HookAction, error) {
69 if h.Stopped() {
70 return HookActionHalt, nil
71 }
72
73 return HookActionContinue, nil
74}
75
76// reset should be called within the lock context
77func (h *stopHook) Reset() {
78 atomic.StoreUint32(&h.stop, 0)
79}
80
81func (h *stopHook) Stop() {
82 atomic.StoreUint32(&h.stop, 1)
83}
84
85func (h *stopHook) Stopped() bool {
86 return atomic.LoadUint32(&h.stop) == 1
87}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go
new file mode 100644
index 0000000..0895971
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go
@@ -0,0 +1,13 @@
1package terraform
2
3//go:generate stringer -type=InstanceType instancetype.go
4
5// InstanceType is an enum of the various types of instances store in the State
6type InstanceType int
7
8const (
9 TypeInvalid InstanceType = iota
10 TypePrimary
11 TypeTainted
12 TypeDeposed
13)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
new file mode 100644
index 0000000..f69267c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT.
2
3package terraform
4
5import "fmt"
6
7const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed"
8
9var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44}
10
11func (i InstanceType) String() string {
12 if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) {
13 return fmt.Sprintf("InstanceType(%d)", i)
14 }
15 return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
new file mode 100644
index 0000000..19dcf21
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
@@ -0,0 +1,782 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "os"
7 "strconv"
8 "strings"
9 "sync"
10
11 "github.com/hashicorp/hil"
12 "github.com/hashicorp/hil/ast"
13 "github.com/hashicorp/terraform/config"
14 "github.com/hashicorp/terraform/config/module"
15 "github.com/hashicorp/terraform/flatmap"
16)
17
18const (
19 // VarEnvPrefix is the prefix of variables that are read from
20 // the environment to set variables here.
21 VarEnvPrefix = "TF_VAR_"
22)
23
24// Interpolater is the structure responsible for determining the values
25// for interpolations such as `aws_instance.foo.bar`.
26type Interpolater struct {
27 Operation walkOperation
28 Meta *ContextMeta
29 Module *module.Tree
30 State *State
31 StateLock *sync.RWMutex
32 VariableValues map[string]interface{}
33 VariableValuesLock *sync.Mutex
34}
35
36// InterpolationScope is the current scope of execution. This is required
37// since some variables which are interpolated are dependent on what we're
38// operating on and where we are.
39type InterpolationScope struct {
40 Path []string
41 Resource *Resource
42}
43
44// Values returns the values for all the variables in the given map.
45func (i *Interpolater) Values(
46 scope *InterpolationScope,
47 vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) {
48 if scope == nil {
49 scope = &InterpolationScope{}
50 }
51
52 result := make(map[string]ast.Variable, len(vars))
53
54 // Copy the default variables
55 if i.Module != nil && scope != nil {
56 mod := i.Module
57 if len(scope.Path) > 1 {
58 mod = i.Module.Child(scope.Path[1:])
59 }
60 for _, v := range mod.Config().Variables {
61 // Set default variables
62 if v.Default == nil {
63 continue
64 }
65
66 n := fmt.Sprintf("var.%s", v.Name)
67 variable, err := hil.InterfaceToVariable(v.Default)
68 if err != nil {
69 return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default)
70 }
71
72 result[n] = variable
73 }
74 }
75
76 for n, rawV := range vars {
77 var err error
78 switch v := rawV.(type) {
79 case *config.CountVariable:
80 err = i.valueCountVar(scope, n, v, result)
81 case *config.ModuleVariable:
82 err = i.valueModuleVar(scope, n, v, result)
83 case *config.PathVariable:
84 err = i.valuePathVar(scope, n, v, result)
85 case *config.ResourceVariable:
86 err = i.valueResourceVar(scope, n, v, result)
87 case *config.SelfVariable:
88 err = i.valueSelfVar(scope, n, v, result)
89 case *config.SimpleVariable:
90 err = i.valueSimpleVar(scope, n, v, result)
91 case *config.TerraformVariable:
92 err = i.valueTerraformVar(scope, n, v, result)
93 case *config.UserVariable:
94 err = i.valueUserVar(scope, n, v, result)
95 default:
96 err = fmt.Errorf("%s: unknown variable type: %T", n, rawV)
97 }
98
99 if err != nil {
100 return nil, err
101 }
102 }
103
104 return result, nil
105}
106
107func (i *Interpolater) valueCountVar(
108 scope *InterpolationScope,
109 n string,
110 v *config.CountVariable,
111 result map[string]ast.Variable) error {
112 switch v.Type {
113 case config.CountValueIndex:
114 if scope.Resource == nil {
115 return fmt.Errorf("%s: count.index is only valid within resources", n)
116 }
117 result[n] = ast.Variable{
118 Value: scope.Resource.CountIndex,
119 Type: ast.TypeInt,
120 }
121 return nil
122 default:
123 return fmt.Errorf("%s: unknown count type: %#v", n, v.Type)
124 }
125}
126
127func unknownVariable() ast.Variable {
128 return ast.Variable{
129 Type: ast.TypeUnknown,
130 Value: config.UnknownVariableValue,
131 }
132}
133
134func unknownValue() string {
135 return hil.UnknownValue
136}
137
138func (i *Interpolater) valueModuleVar(
139 scope *InterpolationScope,
140 n string,
141 v *config.ModuleVariable,
142 result map[string]ast.Variable) error {
143
144 // Build the path to the child module we want
145 path := make([]string, len(scope.Path), len(scope.Path)+1)
146 copy(path, scope.Path)
147 path = append(path, v.Name)
148
149 // Grab the lock so that if other interpolations are running or
150 // state is being modified, we'll be safe.
151 i.StateLock.RLock()
152 defer i.StateLock.RUnlock()
153
154 // Get the module where we're looking for the value
155 mod := i.State.ModuleByPath(path)
156 if mod == nil {
157 // If the module doesn't exist, then we can return an empty string.
158 // This happens usually only in Refresh() when we haven't populated
159 // a state. During validation, we semantically verify that all
160 // modules reference other modules, and graph ordering should
161 // ensure that the module is in the state, so if we reach this
162 // point otherwise it really is a panic.
163 result[n] = unknownVariable()
164
165 // During apply this is always an error
166 if i.Operation == walkApply {
167 return fmt.Errorf(
168 "Couldn't find module %q for var: %s",
169 v.Name, v.FullKey())
170 }
171 } else {
172 // Get the value from the outputs
173 if outputState, ok := mod.Outputs[v.Field]; ok {
174 output, err := hil.InterfaceToVariable(outputState.Value)
175 if err != nil {
176 return err
177 }
178 result[n] = output
179 } else {
180 // Same reasons as the comment above.
181 result[n] = unknownVariable()
182
183 // During apply this is always an error
184 if i.Operation == walkApply {
185 return fmt.Errorf(
186 "Couldn't find output %q for module var: %s",
187 v.Field, v.FullKey())
188 }
189 }
190 }
191
192 return nil
193}
194
195func (i *Interpolater) valuePathVar(
196 scope *InterpolationScope,
197 n string,
198 v *config.PathVariable,
199 result map[string]ast.Variable) error {
200 switch v.Type {
201 case config.PathValueCwd:
202 wd, err := os.Getwd()
203 if err != nil {
204 return fmt.Errorf(
205 "Couldn't get cwd for var %s: %s",
206 v.FullKey(), err)
207 }
208
209 result[n] = ast.Variable{
210 Value: wd,
211 Type: ast.TypeString,
212 }
213 case config.PathValueModule:
214 if t := i.Module.Child(scope.Path[1:]); t != nil {
215 result[n] = ast.Variable{
216 Value: t.Config().Dir,
217 Type: ast.TypeString,
218 }
219 }
220 case config.PathValueRoot:
221 result[n] = ast.Variable{
222 Value: i.Module.Config().Dir,
223 Type: ast.TypeString,
224 }
225 default:
226 return fmt.Errorf("%s: unknown path type: %#v", n, v.Type)
227 }
228
229 return nil
230
231}
232
233func (i *Interpolater) valueResourceVar(
234 scope *InterpolationScope,
235 n string,
236 v *config.ResourceVariable,
237 result map[string]ast.Variable) error {
238 // If we're computing all dynamic fields, then module vars count
239 // and we mark it as computed.
240 if i.Operation == walkValidate {
241 result[n] = unknownVariable()
242 return nil
243 }
244
245 var variable *ast.Variable
246 var err error
247
248 if v.Multi && v.Index == -1 {
249 variable, err = i.computeResourceMultiVariable(scope, v)
250 } else {
251 variable, err = i.computeResourceVariable(scope, v)
252 }
253
254 if err != nil {
255 return err
256 }
257
258 if variable == nil {
259 // During the input walk we tolerate missing variables because
260 // we haven't yet had a chance to refresh state, so dynamic data may
261 // not yet be complete.
262 // If it truly is missing, we'll catch it on a later walk.
263 // This applies only to graph nodes that interpolate during the
264 // config walk, e.g. providers.
265 if i.Operation == walkInput || i.Operation == walkRefresh {
266 result[n] = unknownVariable()
267 return nil
268 }
269
270 return fmt.Errorf("variable %q is nil, but no error was reported", v.Name)
271 }
272
273 result[n] = *variable
274 return nil
275}
276
277func (i *Interpolater) valueSelfVar(
278 scope *InterpolationScope,
279 n string,
280 v *config.SelfVariable,
281 result map[string]ast.Variable) error {
282 if scope == nil || scope.Resource == nil {
283 return fmt.Errorf(
284 "%s: invalid scope, self variables are only valid on resources", n)
285 }
286
287 rv, err := config.NewResourceVariable(fmt.Sprintf(
288 "%s.%s.%d.%s",
289 scope.Resource.Type,
290 scope.Resource.Name,
291 scope.Resource.CountIndex,
292 v.Field))
293 if err != nil {
294 return err
295 }
296
297 return i.valueResourceVar(scope, n, rv, result)
298}
299
300func (i *Interpolater) valueSimpleVar(
301 scope *InterpolationScope,
302 n string,
303 v *config.SimpleVariable,
304 result map[string]ast.Variable) error {
305 // This error message includes some information for people who
306 // relied on this for their template_file data sources. We should
307 // remove this at some point but there isn't any rush.
308 return fmt.Errorf(
309 "invalid variable syntax: %q. Did you mean 'var.%s'? If this is part of inline `template` parameter\n"+
310 "then you must escape the interpolation with two dollar signs. For\n"+
311 "example: ${a} becomes $${a}.",
312 n, n)
313}
314
315func (i *Interpolater) valueTerraformVar(
316 scope *InterpolationScope,
317 n string,
318 v *config.TerraformVariable,
319 result map[string]ast.Variable) error {
320 if v.Field != "env" {
321 return fmt.Errorf(
322 "%s: only supported key for 'terraform.X' interpolations is 'env'", n)
323 }
324
325 if i.Meta == nil {
326 return fmt.Errorf(
327 "%s: internal error: nil Meta. Please report a bug.", n)
328 }
329
330 result[n] = ast.Variable{Type: ast.TypeString, Value: i.Meta.Env}
331 return nil
332}
333
334func (i *Interpolater) valueUserVar(
335 scope *InterpolationScope,
336 n string,
337 v *config.UserVariable,
338 result map[string]ast.Variable) error {
339 i.VariableValuesLock.Lock()
340 defer i.VariableValuesLock.Unlock()
341 val, ok := i.VariableValues[v.Name]
342 if ok {
343 varValue, err := hil.InterfaceToVariable(val)
344 if err != nil {
345 return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
346 v.Name, val, err)
347 }
348 result[n] = varValue
349 return nil
350 }
351
352 if _, ok := result[n]; !ok && i.Operation == walkValidate {
353 result[n] = unknownVariable()
354 return nil
355 }
356
357 // Look up if we have any variables with this prefix because
358 // those are map overrides. Include those.
359 for k, val := range i.VariableValues {
360 if strings.HasPrefix(k, v.Name+".") {
361 keyComponents := strings.Split(k, ".")
362 overrideKey := keyComponents[len(keyComponents)-1]
363
364 mapInterface, ok := result["var."+v.Name]
365 if !ok {
366 return fmt.Errorf("override for non-existent variable: %s", v.Name)
367 }
368
369 mapVariable := mapInterface.Value.(map[string]ast.Variable)
370
371 varValue, err := hil.InterfaceToVariable(val)
372 if err != nil {
373 return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
374 v.Name, val, err)
375 }
376 mapVariable[overrideKey] = varValue
377 }
378 }
379
380 return nil
381}
382
383func (i *Interpolater) computeResourceVariable(
384 scope *InterpolationScope,
385 v *config.ResourceVariable) (*ast.Variable, error) {
386 id := v.ResourceId()
387 if v.Multi {
388 id = fmt.Sprintf("%s.%d", id, v.Index)
389 }
390
391 i.StateLock.RLock()
392 defer i.StateLock.RUnlock()
393
394 unknownVariable := unknownVariable()
395
396 // These variables must be declared early because of the use of GOTO
397 var isList bool
398 var isMap bool
399
400 // Get the information about this resource variable, and verify
401 // that it exists and such.
402 module, cr, err := i.resourceVariableInfo(scope, v)
403 if err != nil {
404 return nil, err
405 }
406
407 // If we're requesting "count" its a special variable that we grab
408 // directly from the config itself.
409 if v.Field == "count" {
410 var count int
411 if cr != nil {
412 count, err = cr.Count()
413 } else {
414 count, err = i.resourceCountMax(module, cr, v)
415 }
416 if err != nil {
417 return nil, fmt.Errorf(
418 "Error reading %s count: %s",
419 v.ResourceId(),
420 err)
421 }
422
423 return &ast.Variable{Type: ast.TypeInt, Value: count}, nil
424 }
425
426 // Get the resource out from the state. We know the state exists
427 // at this point and if there is a state, we expect there to be a
428 // resource with the given name.
429 var r *ResourceState
430 if module != nil && len(module.Resources) > 0 {
431 var ok bool
432 r, ok = module.Resources[id]
433 if !ok && v.Multi && v.Index == 0 {
434 r, ok = module.Resources[v.ResourceId()]
435 }
436 if !ok {
437 r = nil
438 }
439 }
440 if r == nil || r.Primary == nil {
441 if i.Operation == walkApply || i.Operation == walkPlan {
442 return nil, fmt.Errorf(
443 "Resource '%s' not found for variable '%s'",
444 v.ResourceId(),
445 v.FullKey())
446 }
447
448 // If we have no module in the state yet or count, return empty.
449 // NOTE(@mitchellh): I actually don't know why this is here. During
450 // a refactor I kept this here to maintain the same behavior, but
451 // I'm not sure why its here.
452 if module == nil || len(module.Resources) == 0 {
453 return nil, nil
454 }
455
456 goto MISSING
457 }
458
459 if attr, ok := r.Primary.Attributes[v.Field]; ok {
460 v, err := hil.InterfaceToVariable(attr)
461 return &v, err
462 }
463
464 // computed list or map attribute
465 _, isList = r.Primary.Attributes[v.Field+".#"]
466 _, isMap = r.Primary.Attributes[v.Field+".%"]
467 if isList || isMap {
468 variable, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
469 return &variable, err
470 }
471
472 // At apply time, we can't do the "maybe has it" check below
473 // that we need for plans since parent elements might be computed.
474 // Therefore, it is an error and we're missing the key.
475 //
476 // TODO: test by creating a state and configuration that is referencing
477 // a non-existent variable "foo.bar" where the state only has "foo"
478 // and verify plan works, but apply doesn't.
479 if i.Operation == walkApply || i.Operation == walkDestroy {
480 goto MISSING
481 }
482
483 // We didn't find the exact field, so lets separate the dots
484 // and see if anything along the way is a computed set. i.e. if
485 // we have "foo.0.bar" as the field, check to see if "foo" is
486 // a computed list. If so, then the whole thing is computed.
487 if parts := strings.Split(v.Field, "."); len(parts) > 1 {
488 for i := 1; i < len(parts); i++ {
489 // Lists and sets make this
490 key := fmt.Sprintf("%s.#", strings.Join(parts[:i], "."))
491 if attr, ok := r.Primary.Attributes[key]; ok {
492 v, err := hil.InterfaceToVariable(attr)
493 return &v, err
494 }
495
496 // Maps make this
497 key = fmt.Sprintf("%s", strings.Join(parts[:i], "."))
498 if attr, ok := r.Primary.Attributes[key]; ok {
499 v, err := hil.InterfaceToVariable(attr)
500 return &v, err
501 }
502 }
503 }
504
505MISSING:
506 // Validation for missing interpolations should happen at a higher
507 // semantic level. If we reached this point and don't have variables,
508 // just return the computed value.
509 if scope == nil && scope.Resource == nil {
510 return &unknownVariable, nil
511 }
512
513 // If the operation is refresh, it isn't an error for a value to
514 // be unknown. Instead, we return that the value is computed so
515 // that the graph can continue to refresh other nodes. It doesn't
516 // matter because the config isn't interpolated anyways.
517 //
518 // For a Destroy, we're also fine with computed values, since our goal is
519 // only to get destroy nodes for existing resources.
520 //
521 // For an input walk, computed values are okay to return because we're only
522 // looking for missing variables to prompt the user for.
523 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput {
524 return &unknownVariable, nil
525 }
526
527 return nil, fmt.Errorf(
528 "Resource '%s' does not have attribute '%s' "+
529 "for variable '%s'",
530 id,
531 v.Field,
532 v.FullKey())
533}
534
535func (i *Interpolater) computeResourceMultiVariable(
536 scope *InterpolationScope,
537 v *config.ResourceVariable) (*ast.Variable, error) {
538 i.StateLock.RLock()
539 defer i.StateLock.RUnlock()
540
541 unknownVariable := unknownVariable()
542
543 // If we're only looking for input, we don't need to expand a
544 // multi-variable. This prevents us from encountering things that should be
545 // known but aren't because the state has yet to be refreshed.
546 if i.Operation == walkInput {
547 return &unknownVariable, nil
548 }
549
550 // Get the information about this resource variable, and verify
551 // that it exists and such.
552 module, cr, err := i.resourceVariableInfo(scope, v)
553 if err != nil {
554 return nil, err
555 }
556
557 // Get the keys for all the resources that are created for this resource
558 countMax, err := i.resourceCountMax(module, cr, v)
559 if err != nil {
560 return nil, err
561 }
562
563 // If count is zero, we return an empty list
564 if countMax == 0 {
565 return &ast.Variable{Type: ast.TypeList, Value: []ast.Variable{}}, nil
566 }
567
568 // If we have no module in the state yet or count, return unknown
569 if module == nil || len(module.Resources) == 0 {
570 return &unknownVariable, nil
571 }
572
573 var values []interface{}
574 for idx := 0; idx < countMax; idx++ {
575 id := fmt.Sprintf("%s.%d", v.ResourceId(), idx)
576
577 // ID doesn't have a trailing index. We try both here, but if a value
578 // without a trailing index is found we prefer that. This choice
579 // is for legacy reasons: older versions of TF preferred it.
580 if id == v.ResourceId()+".0" {
581 potential := v.ResourceId()
582 if _, ok := module.Resources[potential]; ok {
583 id = potential
584 }
585 }
586
587 r, ok := module.Resources[id]
588 if !ok {
589 continue
590 }
591
592 if r.Primary == nil {
593 continue
594 }
595
596 if singleAttr, ok := r.Primary.Attributes[v.Field]; ok {
597 values = append(values, singleAttr)
598 continue
599 }
600
601 // computed list or map attribute
602 _, isList := r.Primary.Attributes[v.Field+".#"]
603 _, isMap := r.Primary.Attributes[v.Field+".%"]
604 if !(isList || isMap) {
605 continue
606 }
607 multiAttr, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
608 if err != nil {
609 return nil, err
610 }
611
612 values = append(values, multiAttr)
613 }
614
615 if len(values) == 0 {
616 // If the operation is refresh, it isn't an error for a value to
617 // be unknown. Instead, we return that the value is computed so
618 // that the graph can continue to refresh other nodes. It doesn't
619 // matter because the config isn't interpolated anyways.
620 //
621 // For a Destroy, we're also fine with computed values, since our goal is
622 // only to get destroy nodes for existing resources.
623 //
624 // For an input walk, computed values are okay to return because we're only
625 // looking for missing variables to prompt the user for.
626 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput {
627 return &unknownVariable, nil
628 }
629
630 return nil, fmt.Errorf(
631 "Resource '%s' does not have attribute '%s' "+
632 "for variable '%s'",
633 v.ResourceId(),
634 v.Field,
635 v.FullKey())
636 }
637
638 variable, err := hil.InterfaceToVariable(values)
639 return &variable, err
640}
641
642func (i *Interpolater) interpolateComplexTypeAttribute(
643 resourceID string,
644 attributes map[string]string) (ast.Variable, error) {
645
646 // We can now distinguish between lists and maps in state by the count field:
647 // - lists (and by extension, sets) use the traditional .# notation
648 // - maps use the newer .% notation
649 // Consequently here we can decide how to deal with the keys appropriately
650 // based on whether the type is a map of list.
651 if lengthAttr, isList := attributes[resourceID+".#"]; isList {
652 log.Printf("[DEBUG] Interpolating computed list element attribute %s (%s)",
653 resourceID, lengthAttr)
654
655 // In Terraform's internal dotted representation of list-like attributes, the
656 // ".#" count field is marked as unknown to indicate "this whole list is
657 // unknown". We must honor that meaning here so computed references can be
658 // treated properly during the plan phase.
659 if lengthAttr == config.UnknownVariableValue {
660 return unknownVariable(), nil
661 }
662
663 expanded := flatmap.Expand(attributes, resourceID)
664 return hil.InterfaceToVariable(expanded)
665 }
666
667 if lengthAttr, isMap := attributes[resourceID+".%"]; isMap {
668 log.Printf("[DEBUG] Interpolating computed map element attribute %s (%s)",
669 resourceID, lengthAttr)
670
671 // In Terraform's internal dotted representation of map attributes, the
672 // ".%" count field is marked as unknown to indicate "this whole list is
673 // unknown". We must honor that meaning here so computed references can be
674 // treated properly during the plan phase.
675 if lengthAttr == config.UnknownVariableValue {
676 return unknownVariable(), nil
677 }
678
679 expanded := flatmap.Expand(attributes, resourceID)
680 return hil.InterfaceToVariable(expanded)
681 }
682
683 return ast.Variable{}, fmt.Errorf("No complex type %s found", resourceID)
684}
685
686func (i *Interpolater) resourceVariableInfo(
687 scope *InterpolationScope,
688 v *config.ResourceVariable) (*ModuleState, *config.Resource, error) {
689 // Get the module tree that contains our current path. This is
690 // either the current module (path is empty) or a child.
691 modTree := i.Module
692 if len(scope.Path) > 1 {
693 modTree = i.Module.Child(scope.Path[1:])
694 }
695
696 // Get the resource from the configuration so we can verify
697 // that the resource is in the configuration and so we can access
698 // the configuration if we need to.
699 var cr *config.Resource
700 for _, r := range modTree.Config().Resources {
701 if r.Id() == v.ResourceId() {
702 cr = r
703 break
704 }
705 }
706
707 // Get the relevant module
708 module := i.State.ModuleByPath(scope.Path)
709 return module, cr, nil
710}
711
712func (i *Interpolater) resourceCountMax(
713 ms *ModuleState,
714 cr *config.Resource,
715 v *config.ResourceVariable) (int, error) {
716 id := v.ResourceId()
717
718 // If we're NOT applying, then we assume we can read the count
719 // from the state. Plan and so on may not have any state yet so
720 // we do a full interpolation.
721 if i.Operation != walkApply {
722 if cr == nil {
723 return 0, nil
724 }
725
726 count, err := cr.Count()
727 if err != nil {
728 return 0, err
729 }
730
731 return count, nil
732 }
733
734 // We need to determine the list of resource keys to get values from.
735 // This needs to be sorted so the order is deterministic. We used to
736 // use "cr.Count()" but that doesn't work if the count is interpolated
737 // and we can't guarantee that so we instead depend on the state.
738 max := -1
739 for k, _ := range ms.Resources {
740 // Get the index number for this resource
741 index := ""
742 if k == id {
743 // If the key is the id, then its just 0 (no explicit index)
744 index = "0"
745 } else if strings.HasPrefix(k, id+".") {
746 // Grab the index number out of the state
747 index = k[len(id+"."):]
748 if idx := strings.IndexRune(index, '.'); idx >= 0 {
749 index = index[:idx]
750 }
751 }
752
753 // If there was no index then this resource didn't match
754 // the one we're looking for, exit.
755 if index == "" {
756 continue
757 }
758
759 // Turn the index into an int
760 raw, err := strconv.ParseInt(index, 0, 0)
761 if err != nil {
762 return 0, fmt.Errorf(
763 "%s: error parsing index %q as int: %s",
764 id, index, err)
765 }
766
767 // Keep track of this index if its the max
768 if new := int(raw); new > max {
769 max = new
770 }
771 }
772
773 // If we never found any matching resources in the state, we
774 // have zero.
775 if max == -1 {
776 return 0, nil
777 }
778
779 // The result value is "max+1" because we're returning the
780 // max COUNT, not the max INDEX, and we zero-index.
781 return max + 1, nil
782}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
new file mode 100644
index 0000000..bd32c79
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
@@ -0,0 +1,14 @@
1package terraform
2
3// NodeCountBoundary fixes any "count boundarie" in the state: resources
4// that are named "foo.0" when they should be named "foo"
5type NodeCountBoundary struct{}
6
7func (n *NodeCountBoundary) Name() string {
8 return "meta.count-boundary (count boundary fixup)"
9}
10
11// GraphNodeEvalable
12func (n *NodeCountBoundary) EvalTree() EvalNode {
13 return &EvalCountFixZeroOneBoundaryGlobal{}
14}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
new file mode 100644
index 0000000..e32cea8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
@@ -0,0 +1,22 @@
1package terraform
2
3// NodeDestroyableDataResource represents a resource that is "plannable":
4// it is ready to be planned in order to create a diff.
5type NodeDestroyableDataResource struct {
6 *NodeAbstractResource
7}
8
9// GraphNodeEvalable
10func (n *NodeDestroyableDataResource) EvalTree() EvalNode {
11 addr := n.NodeAbstractResource.Addr
12
13 // stateId is the ID to put into the state
14 stateId := addr.stateId()
15
16 // Just destroy it.
17 var state *InstanceState
18 return &EvalWriteState{
19 Name: stateId,
20 State: &state, // state is nil here
21 }
22}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
new file mode 100644
index 0000000..d504c89
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
@@ -0,0 +1,198 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// NodeRefreshableDataResource represents a resource that is "plannable":
8// it is ready to be planned in order to create a diff.
9type NodeRefreshableDataResource struct {
10 *NodeAbstractCountResource
11}
12
13// GraphNodeDynamicExpandable
14func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
15 // Grab the state which we read
16 state, lock := ctx.State()
17 lock.RLock()
18 defer lock.RUnlock()
19
20 // Expand the resource count which must be available by now from EvalTree
21 count, err := n.Config.Count()
22 if err != nil {
23 return nil, err
24 }
25
26 // The concrete resource factory we'll use
27 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
28 // Add the config and state since we don't do that via transforms
29 a.Config = n.Config
30
31 return &NodeRefreshableDataResourceInstance{
32 NodeAbstractResource: a,
33 }
34 }
35
36 // Start creating the steps
37 steps := []GraphTransformer{
38 // Expand the count.
39 &ResourceCountTransformer{
40 Concrete: concreteResource,
41 Count: count,
42 Addr: n.ResourceAddr(),
43 },
44
45 // Attach the state
46 &AttachStateTransformer{State: state},
47
48 // Targeting
49 &TargetsTransformer{ParsedTargets: n.Targets},
50
51 // Connect references so ordering is correct
52 &ReferenceTransformer{},
53
54 // Make sure there is a single root
55 &RootTransformer{},
56 }
57
58 // Build the graph
59 b := &BasicGraphBuilder{
60 Steps: steps,
61 Validate: true,
62 Name: "NodeRefreshableDataResource",
63 }
64
65 return b.Build(ctx.Path())
66}
67
68// NodeRefreshableDataResourceInstance represents a _single_ resource instance
69// that is refreshable.
70type NodeRefreshableDataResourceInstance struct {
71 *NodeAbstractResource
72}
73
74// GraphNodeEvalable
75func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
76 addr := n.NodeAbstractResource.Addr
77
78 // stateId is the ID to put into the state
79 stateId := addr.stateId()
80
81 // Build the instance info. More of this will be populated during eval
82 info := &InstanceInfo{
83 Id: stateId,
84 Type: addr.Type,
85 }
86
87 // Get the state if we have it, if not we build it
88 rs := n.ResourceState
89 if rs == nil {
90 rs = &ResourceState{}
91 }
92
93 // If the config isn't empty we update the state
94 if n.Config != nil {
95 rs = &ResourceState{
96 Type: n.Config.Type,
97 Provider: n.Config.Provider,
98 Dependencies: n.StateReferences(),
99 }
100 }
101
102 // Build the resource for eval
103 resource := &Resource{
104 Name: addr.Name,
105 Type: addr.Type,
106 CountIndex: addr.Index,
107 }
108 if resource.CountIndex < 0 {
109 resource.CountIndex = 0
110 }
111
112 // Declare a bunch of variables that are used for state during
113 // evaluation. Most of this are written to by-address below.
114 var config *ResourceConfig
115 var diff *InstanceDiff
116 var provider ResourceProvider
117 var state *InstanceState
118
119 return &EvalSequence{
120 Nodes: []EvalNode{
121 // Always destroy the existing state first, since we must
122 // make sure that values from a previous read will not
123 // get interpolated if we end up needing to defer our
124 // loading until apply time.
125 &EvalWriteState{
126 Name: stateId,
127 ResourceType: rs.Type,
128 Provider: rs.Provider,
129 Dependencies: rs.Dependencies,
130 State: &state, // state is nil here
131 },
132
133 &EvalInterpolate{
134 Config: n.Config.RawConfig.Copy(),
135 Resource: resource,
136 Output: &config,
137 },
138
139 // The rest of this pass can proceed only if there are no
140 // computed values in our config.
141 // (If there are, we'll deal with this during the plan and
142 // apply phases.)
143 &EvalIf{
144 If: func(ctx EvalContext) (bool, error) {
145 if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 {
146 return true, EvalEarlyExitError{}
147 }
148
149 // If the config explicitly has a depends_on for this
150 // data source, assume the intention is to prevent
151 // refreshing ahead of that dependency.
152 if len(n.Config.DependsOn) > 0 {
153 return true, EvalEarlyExitError{}
154 }
155
156 return true, nil
157 },
158
159 Then: EvalNoop{},
160 },
161
162 // The remainder of this pass is the same as running
163 // a "plan" pass immediately followed by an "apply" pass,
164 // populating the state early so it'll be available to
165 // provider configurations that need this data during
166 // refresh/plan.
167 &EvalGetProvider{
168 Name: n.ProvidedBy()[0],
169 Output: &provider,
170 },
171
172 &EvalReadDataDiff{
173 Info: info,
174 Config: &config,
175 Provider: &provider,
176 Output: &diff,
177 OutputState: &state,
178 },
179
180 &EvalReadDataApply{
181 Info: info,
182 Diff: &diff,
183 Provider: &provider,
184 Output: &state,
185 },
186
187 &EvalWriteState{
188 Name: stateId,
189 ResourceType: rs.Type,
190 Provider: rs.Provider,
191 Dependencies: rs.Dependencies,
192 State: &state,
193 },
194
195 &EvalUpdateStateHook{},
196 },
197 }
198}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
new file mode 100644
index 0000000..319df1e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
@@ -0,0 +1,29 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// NodeDestroyableModule represents a module destruction.
8type NodeDestroyableModuleVariable struct {
9 PathValue []string
10}
11
12func (n *NodeDestroyableModuleVariable) Name() string {
13 result := "plan-destroy"
14 if len(n.PathValue) > 1 {
15 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
16 }
17
18 return result
19}
20
21// GraphNodeSubPath
22func (n *NodeDestroyableModuleVariable) Path() []string {
23 return n.PathValue
24}
25
26// GraphNodeEvalable
27func (n *NodeDestroyableModuleVariable) EvalTree() EvalNode {
28 return &EvalDiffDestroyModule{Path: n.PathValue}
29}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
new file mode 100644
index 0000000..13fe8fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
@@ -0,0 +1,125 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/config/module"
8)
9
10// NodeApplyableModuleVariable represents a module variable input during
11// the apply step.
12type NodeApplyableModuleVariable struct {
13 PathValue []string
14 Config *config.Variable // Config is the var in the config
15 Value *config.RawConfig // Value is the value that is set
16
17 Module *module.Tree // Antiquated, want to remove
18}
19
20func (n *NodeApplyableModuleVariable) Name() string {
21 result := fmt.Sprintf("var.%s", n.Config.Name)
22 if len(n.PathValue) > 1 {
23 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
24 }
25
26 return result
27}
28
29// GraphNodeSubPath
30func (n *NodeApplyableModuleVariable) Path() []string {
31 // We execute in the parent scope (above our own module) so that
32 // we can access the proper interpolations.
33 if len(n.PathValue) > 2 {
34 return n.PathValue[:len(n.PathValue)-1]
35 }
36
37 return rootModulePath
38}
39
40// RemovableIfNotTargeted
41func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool {
42 // We need to add this so that this node will be removed if
43 // it isn't targeted or a dependency of a target.
44 return true
45}
46
47// GraphNodeReferenceGlobal
48func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool {
49 // We have to create fully qualified references because we cross
50 // boundaries here: our ReferenceableName is in one path and our
51 // References are from another path.
52 return true
53}
54
55// GraphNodeReferenceable
56func (n *NodeApplyableModuleVariable) ReferenceableName() []string {
57 return []string{n.Name()}
58}
59
60// GraphNodeReferencer
61func (n *NodeApplyableModuleVariable) References() []string {
62 // If we have no value set, we depend on nothing
63 if n.Value == nil {
64 return nil
65 }
66
67 // Can't depend on anything if we're in the root
68 if len(n.PathValue) < 2 {
69 return nil
70 }
71
72 // Otherwise, we depend on anything that is in our value, but
73 // specifically in the namespace of the parent path.
74 // Create the prefix based on the path
75 var prefix string
76 if p := n.Path(); len(p) > 0 {
77 prefix = modulePrefixStr(p)
78 }
79
80 result := ReferencesFromConfig(n.Value)
81 return modulePrefixList(result, prefix)
82}
83
84// GraphNodeEvalable
85func (n *NodeApplyableModuleVariable) EvalTree() EvalNode {
86 // If we have no value, do nothing
87 if n.Value == nil {
88 return &EvalNoop{}
89 }
90
91 // Otherwise, interpolate the value of this variable and set it
92 // within the variables mapping.
93 var config *ResourceConfig
94 variables := make(map[string]interface{})
95 return &EvalSequence{
96 Nodes: []EvalNode{
97 &EvalInterpolate{
98 Config: n.Value,
99 Output: &config,
100 },
101
102 &EvalVariableBlock{
103 Config: &config,
104 VariableValues: variables,
105 },
106
107 &EvalCoerceMapVariable{
108 Variables: variables,
109 ModulePath: n.PathValue,
110 ModuleTree: n.Module,
111 },
112
113 &EvalTypeCheckVariable{
114 Variables: variables,
115 ModulePath: n.PathValue,
116 ModuleTree: n.Module,
117 },
118
119 &EvalSetVariables{
120 Module: &n.PathValue[len(n.PathValue)-1],
121 Variables: variables,
122 },
123 },
124 }
125}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
new file mode 100644
index 0000000..e28e6f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
@@ -0,0 +1,76 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/terraform/config"
8)
9
10// NodeApplyableOutput represents an output that is "applyable":
11// it is ready to be applied.
12type NodeApplyableOutput struct {
13 PathValue []string
14 Config *config.Output // Config is the output in the config
15}
16
17func (n *NodeApplyableOutput) Name() string {
18 result := fmt.Sprintf("output.%s", n.Config.Name)
19 if len(n.PathValue) > 1 {
20 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
21 }
22
23 return result
24}
25
26// GraphNodeSubPath
27func (n *NodeApplyableOutput) Path() []string {
28 return n.PathValue
29}
30
31// RemovableIfNotTargeted
32func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool {
33 // We need to add this so that this node will be removed if
34 // it isn't targeted or a dependency of a target.
35 return true
36}
37
38// GraphNodeReferenceable
39func (n *NodeApplyableOutput) ReferenceableName() []string {
40 name := fmt.Sprintf("output.%s", n.Config.Name)
41 return []string{name}
42}
43
44// GraphNodeReferencer
45func (n *NodeApplyableOutput) References() []string {
46 var result []string
47 result = append(result, n.Config.DependsOn...)
48 result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
49 for _, v := range result {
50 split := strings.Split(v, "/")
51 for i, s := range split {
52 split[i] = s + ".destroy"
53 }
54
55 result = append(result, strings.Join(split, "/"))
56 }
57
58 return result
59}
60
61// GraphNodeEvalable
62func (n *NodeApplyableOutput) EvalTree() EvalNode {
63 return &EvalOpFilter{
64 Ops: []walkOperation{walkRefresh, walkPlan, walkApply,
65 walkDestroy, walkInput, walkValidate},
66 Node: &EvalSequence{
67 Nodes: []EvalNode{
68 &EvalWriteOutput{
69 Name: n.Config.Name,
70 Sensitive: n.Config.Sensitive,
71 Value: n.Config.RawConfig,
72 },
73 },
74 },
75 }
76}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
new file mode 100644
index 0000000..636a15d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
@@ -0,0 +1,35 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// NodeOutputOrphan represents an output that is an orphan.
8type NodeOutputOrphan struct {
9 OutputName string
10 PathValue []string
11}
12
13func (n *NodeOutputOrphan) Name() string {
14 result := fmt.Sprintf("output.%s (orphan)", n.OutputName)
15 if len(n.PathValue) > 1 {
16 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
17 }
18
19 return result
20}
21
22// GraphNodeSubPath
23func (n *NodeOutputOrphan) Path() []string {
24 return n.PathValue
25}
26
27// GraphNodeEvalable
28func (n *NodeOutputOrphan) EvalTree() EvalNode {
29 return &EvalOpFilter{
30 Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
31 Node: &EvalDeleteOutput{
32 Name: n.OutputName,
33 },
34 }
35}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
new file mode 100644
index 0000000..8e2c176
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
@@ -0,0 +1,11 @@
1package terraform
2
3// NodeApplyableProvider represents a provider during an apply.
4type NodeApplyableProvider struct {
5 *NodeAbstractProvider
6}
7
8// GraphNodeEvalable
9func (n *NodeApplyableProvider) EvalTree() EvalNode {
10 return ProviderEvalTree(n.NameValue, n.ProviderConfig())
11}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
new file mode 100644
index 0000000..6cc8365
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
@@ -0,0 +1,85 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// ConcreteProviderNodeFunc is a callback type used to convert an
11// abstract provider to a concrete one of some type.
12type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex
13
14// NodeAbstractProvider represents a provider that has no associated operations.
15// It registers all the common interfaces across operations for providers.
16type NodeAbstractProvider struct {
17 NameValue string
18 PathValue []string
19
20 // The fields below will be automatically set using the Attach
21 // interfaces if you're running those transforms, but also be explicitly
22 // set if you already have that information.
23
24 Config *config.ProviderConfig
25}
26
27func (n *NodeAbstractProvider) Name() string {
28 result := fmt.Sprintf("provider.%s", n.NameValue)
29 if len(n.PathValue) > 1 {
30 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
31 }
32
33 return result
34}
35
36// GraphNodeSubPath
37func (n *NodeAbstractProvider) Path() []string {
38 return n.PathValue
39}
40
41// RemovableIfNotTargeted
42func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool {
43 // We need to add this so that this node will be removed if
44 // it isn't targeted or a dependency of a target.
45 return true
46}
47
48// GraphNodeReferencer
49func (n *NodeAbstractProvider) References() []string {
50 if n.Config == nil {
51 return nil
52 }
53
54 return ReferencesFromConfig(n.Config.RawConfig)
55}
56
57// GraphNodeProvider
58func (n *NodeAbstractProvider) ProviderName() string {
59 return n.NameValue
60}
61
62// GraphNodeProvider
63func (n *NodeAbstractProvider) ProviderConfig() *config.RawConfig {
64 if n.Config == nil {
65 return nil
66 }
67
68 return n.Config.RawConfig
69}
70
71// GraphNodeAttachProvider
72func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) {
73 n.Config = c
74}
75
76// GraphNodeDotter impl.
77func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
78 return &dag.DotNode{
79 Name: name,
80 Attrs: map[string]string{
81 "label": n.Name(),
82 "shape": "diamond",
83 },
84 }
85}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
new file mode 100644
index 0000000..25e7e62
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
@@ -0,0 +1,38 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// NodeDisabledProvider represents a provider that is disabled. A disabled
8// provider does nothing. It exists to properly set inheritance information
9// for child providers.
10type NodeDisabledProvider struct {
11 *NodeAbstractProvider
12}
13
14func (n *NodeDisabledProvider) Name() string {
15 return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name())
16}
17
18// GraphNodeEvalable
19func (n *NodeDisabledProvider) EvalTree() EvalNode {
20 var resourceConfig *ResourceConfig
21 return &EvalSequence{
22 Nodes: []EvalNode{
23 &EvalInterpolate{
24 Config: n.ProviderConfig(),
25 Output: &resourceConfig,
26 },
27 &EvalBuildProviderConfig{
28 Provider: n.ProviderName(),
29 Config: &resourceConfig,
30 Output: &resourceConfig,
31 },
32 &EvalSetProviderConfig{
33 Provider: n.ProviderName(),
34 Config: &resourceConfig,
35 },
36 },
37 }
38}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
new file mode 100644
index 0000000..bb117c1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
@@ -0,0 +1,44 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeProvisioner represents a provider that has no associated operations.
10// It registers all the common interfaces across operations for providers.
11type NodeProvisioner struct {
12 NameValue string
13 PathValue []string
14
15 // The fields below will be automatically set using the Attach
16 // interfaces if you're running those transforms, but also be explicitly
17 // set if you already have that information.
18
19 Config *config.ProviderConfig
20}
21
22func (n *NodeProvisioner) Name() string {
23 result := fmt.Sprintf("provisioner.%s", n.NameValue)
24 if len(n.PathValue) > 1 {
25 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
26 }
27
28 return result
29}
30
31// GraphNodeSubPath
32func (n *NodeProvisioner) Path() []string {
33 return n.PathValue
34}
35
36// GraphNodeProvisioner
37func (n *NodeProvisioner) ProvisionerName() string {
38 return n.NameValue
39}
40
41// GraphNodeEvalable impl.
42func (n *NodeProvisioner) EvalTree() EvalNode {
43 return &EvalInitProvisioner{Name: n.NameValue}
44}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
new file mode 100644
index 0000000..50bb707
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
@@ -0,0 +1,240 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// ConcreteResourceNodeFunc is a callback type used to convert an
12// abstract resource to a concrete one of some type.
13type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex
14
15// GraphNodeResource is implemented by any nodes that represent a resource.
16// The type of operation cannot be assumed, only that this node represents
17// the given resource.
18type GraphNodeResource interface {
19 ResourceAddr() *ResourceAddress
20}
21
22// NodeAbstractResource represents a resource that has no associated
23// operations. It registers all the interfaces for a resource that common
24// across multiple operation types.
25type NodeAbstractResource struct {
26 Addr *ResourceAddress // Addr is the address for this resource
27
28 // The fields below will be automatically set using the Attach
29 // interfaces if you're running those transforms, but also be explicitly
30 // set if you already have that information.
31
32 Config *config.Resource // Config is the resource in the config
33 ResourceState *ResourceState // ResourceState is the ResourceState for this
34
35 Targets []ResourceAddress // Set from GraphNodeTargetable
36}
37
38func (n *NodeAbstractResource) Name() string {
39 return n.Addr.String()
40}
41
42// GraphNodeSubPath
43func (n *NodeAbstractResource) Path() []string {
44 return n.Addr.Path
45}
46
47// GraphNodeReferenceable
48func (n *NodeAbstractResource) ReferenceableName() []string {
49 // We always are referenceable as "type.name" as long as
50 // we have a config or address. Determine what that value is.
51 var id string
52 if n.Config != nil {
53 id = n.Config.Id()
54 } else if n.Addr != nil {
55 addrCopy := n.Addr.Copy()
56 addrCopy.Path = nil // ReferenceTransformer handles paths
57 addrCopy.Index = -1 // We handle indexes below
58 id = addrCopy.String()
59 } else {
60 // No way to determine our type.name, just return
61 return nil
62 }
63
64 var result []string
65
66 // Always include our own ID. This is primarily for backwards
67 // compatibility with states that didn't yet support the more
68 // specific dep string.
69 result = append(result, id)
70
71 // We represent all multi-access
72 result = append(result, fmt.Sprintf("%s.*", id))
73
74 // We represent either a specific number, or all numbers
75 suffix := "N"
76 if n.Addr != nil {
77 idx := n.Addr.Index
78 if idx == -1 {
79 idx = 0
80 }
81
82 suffix = fmt.Sprintf("%d", idx)
83 }
84 result = append(result, fmt.Sprintf("%s.%s", id, suffix))
85
86 return result
87}
88
89// GraphNodeReferencer
90func (n *NodeAbstractResource) References() []string {
91 // If we have a config, that is our source of truth
92 if c := n.Config; c != nil {
93 // Grab all the references
94 var result []string
95 result = append(result, c.DependsOn...)
96 result = append(result, ReferencesFromConfig(c.RawCount)...)
97 result = append(result, ReferencesFromConfig(c.RawConfig)...)
98 for _, p := range c.Provisioners {
99 if p.When == config.ProvisionerWhenCreate {
100 result = append(result, ReferencesFromConfig(p.ConnInfo)...)
101 result = append(result, ReferencesFromConfig(p.RawConfig)...)
102 }
103 }
104
105 return uniqueStrings(result)
106 }
107
108 // If we have state, that is our next source
109 if s := n.ResourceState; s != nil {
110 return s.Dependencies
111 }
112
113 return nil
114}
115
116// StateReferences returns the dependencies to put into the state for
117// this resource.
118func (n *NodeAbstractResource) StateReferences() []string {
119 self := n.ReferenceableName()
120
121 // Determine what our "prefix" is for checking for references to
122 // ourself.
123 addrCopy := n.Addr.Copy()
124 addrCopy.Index = -1
125 selfPrefix := addrCopy.String() + "."
126
127 depsRaw := n.References()
128 deps := make([]string, 0, len(depsRaw))
129 for _, d := range depsRaw {
130 // Ignore any variable dependencies
131 if strings.HasPrefix(d, "var.") {
132 continue
133 }
134
135 // If this has a backup ref, ignore those for now. The old state
136 // file never contained those and I'd rather store the rich types we
137 // add in the future.
138 if idx := strings.IndexRune(d, '/'); idx != -1 {
139 d = d[:idx]
140 }
141
142 // If we're referencing ourself, then ignore it
143 found := false
144 for _, s := range self {
145 if d == s {
146 found = true
147 }
148 }
149 if found {
150 continue
151 }
152
153 // If this is a reference to ourself and a specific index, we keep
154 // it. For example, if this resource is "foo.bar" and the reference
155 // is "foo.bar.0" then we keep it exact. Otherwise, we strip it.
156 if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) {
157 d = d[:len(d)-2]
158 }
159
160 // This is sad. The dependencies are currently in the format of
161 // "module.foo.bar" (the full field). This strips the field off.
162 if strings.HasPrefix(d, "module.") {
163 parts := strings.SplitN(d, ".", 3)
164 d = strings.Join(parts[0:2], ".")
165 }
166
167 deps = append(deps, d)
168 }
169
170 return deps
171}
172
173// GraphNodeProviderConsumer
174func (n *NodeAbstractResource) ProvidedBy() []string {
175 // If we have a config we prefer that above all else
176 if n.Config != nil {
177 return []string{resourceProvider(n.Config.Type, n.Config.Provider)}
178 }
179
180 // If we have state, then we will use the provider from there
181 if n.ResourceState != nil && n.ResourceState.Provider != "" {
182 return []string{n.ResourceState.Provider}
183 }
184
185 // Use our type
186 return []string{resourceProvider(n.Addr.Type, "")}
187}
188
189// GraphNodeProvisionerConsumer
190func (n *NodeAbstractResource) ProvisionedBy() []string {
191 // If we have no configuration, then we have no provisioners
192 if n.Config == nil {
193 return nil
194 }
195
196 // Build the list of provisioners we need based on the configuration.
197 // It is okay to have duplicates here.
198 result := make([]string, len(n.Config.Provisioners))
199 for i, p := range n.Config.Provisioners {
200 result[i] = p.Type
201 }
202
203 return result
204}
205
206// GraphNodeResource, GraphNodeAttachResourceState
207func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress {
208 return n.Addr
209}
210
211// GraphNodeAddressable, TODO: remove, used by target, should unify
212func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress {
213 return n.ResourceAddr()
214}
215
216// GraphNodeTargetable
217func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) {
218 n.Targets = targets
219}
220
221// GraphNodeAttachResourceState
222func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) {
223 n.ResourceState = s
224}
225
226// GraphNodeAttachResourceConfig
227func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) {
228 n.Config = c
229}
230
231// GraphNodeDotter impl.
232func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
233 return &dag.DotNode{
234 Name: name,
235 Attrs: map[string]string{
236 "label": n.Name(),
237 "shape": "box",
238 },
239 }
240}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
new file mode 100644
index 0000000..573570d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
@@ -0,0 +1,50 @@
1package terraform
2
3// NodeAbstractCountResource should be embedded instead of NodeAbstractResource
4// if the resource has a `count` value that needs to be expanded.
5//
6// The embedder should implement `DynamicExpand` to process the count.
7type NodeAbstractCountResource struct {
8 *NodeAbstractResource
9
10 // Validate, if true, will perform the validation for the count.
11 // This should only be turned on for the "validate" operation.
12 Validate bool
13}
14
15// GraphNodeEvalable
16func (n *NodeAbstractCountResource) EvalTree() EvalNode {
17 // We only check if the count is computed if we're not validating.
18 // If we're validating we allow computed counts since they just turn
19 // into more computed values.
20 var evalCountCheckComputed EvalNode
21 if !n.Validate {
22 evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config}
23 }
24
25 return &EvalSequence{
26 Nodes: []EvalNode{
27 // The EvalTree for a plannable resource primarily involves
28 // interpolating the count since it can contain variables
29 // we only just received access to.
30 //
31 // With the interpolated count, we can then DynamicExpand
32 // into the proper number of instances.
33 &EvalInterpolate{Config: n.Config.RawCount},
34
35 // Check if the count is computed
36 evalCountCheckComputed,
37
38 // If validation is enabled, perform the validation
39 &EvalIf{
40 If: func(ctx EvalContext) (bool, error) {
41 return n.Validate, nil
42 },
43
44 Then: &EvalValidateCount{Resource: n.Config},
45 },
46
47 &EvalCountFixZeroOneBoundary{Resource: n.Config},
48 },
49 }
50}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
new file mode 100644
index 0000000..3599782
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
@@ -0,0 +1,357 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeApplyableResource represents a resource that is "applyable":
10// it is ready to be applied and is represented by a diff.
11type NodeApplyableResource struct {
12 *NodeAbstractResource
13}
14
15// GraphNodeCreator
16func (n *NodeApplyableResource) CreateAddr() *ResourceAddress {
17 return n.NodeAbstractResource.Addr
18}
19
20// GraphNodeReferencer, overriding NodeAbstractResource
21func (n *NodeApplyableResource) References() []string {
22 result := n.NodeAbstractResource.References()
23
24 // The "apply" side of a resource generally also depends on the
25 // destruction of its dependencies as well. For example, if a LB
26 // references a set of VMs with ${vm.foo.*.id}, then we must wait for
27 // the destruction so we get the newly updated list of VMs.
28 //
29 // The exception here is CBD. When CBD is set, we don't do this since
30 // it would create a cycle. By not creating a cycle, we require two
31 // applies since the first apply the creation step will use the OLD
32 // values (pre-destroy) and the second step will update.
33 //
34 // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x).
35 // We mimic that behavior here now and can improve upon it in the future.
36 //
37 // This behavior is tested in graph_build_apply_test.go to test ordering.
38 cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy
39 if !cbd {
40 // The "apply" side of a resource always depends on the destruction
41 // of all its dependencies in addition to the creation.
42 for _, v := range result {
43 result = append(result, v+".destroy")
44 }
45 }
46
47 return result
48}
49
50// GraphNodeEvalable
51func (n *NodeApplyableResource) EvalTree() EvalNode {
52 addr := n.NodeAbstractResource.Addr
53
54 // stateId is the ID to put into the state
55 stateId := addr.stateId()
56
57 // Build the instance info. More of this will be populated during eval
58 info := &InstanceInfo{
59 Id: stateId,
60 Type: addr.Type,
61 }
62
63 // Build the resource for eval
64 resource := &Resource{
65 Name: addr.Name,
66 Type: addr.Type,
67 CountIndex: addr.Index,
68 }
69 if resource.CountIndex < 0 {
70 resource.CountIndex = 0
71 }
72
73 // Determine the dependencies for the state.
74 stateDeps := n.StateReferences()
75
76 // Eval info is different depending on what kind of resource this is
77 switch n.Config.Mode {
78 case config.ManagedResourceMode:
79 return n.evalTreeManagedResource(
80 stateId, info, resource, stateDeps,
81 )
82 case config.DataResourceMode:
83 return n.evalTreeDataResource(
84 stateId, info, resource, stateDeps)
85 default:
86 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
87 }
88}
89
90func (n *NodeApplyableResource) evalTreeDataResource(
91 stateId string, info *InstanceInfo,
92 resource *Resource, stateDeps []string) EvalNode {
93 var provider ResourceProvider
94 var config *ResourceConfig
95 var diff *InstanceDiff
96 var state *InstanceState
97
98 return &EvalSequence{
99 Nodes: []EvalNode{
100 // Build the instance info
101 &EvalInstanceInfo{
102 Info: info,
103 },
104
105 // Get the saved diff for apply
106 &EvalReadDiff{
107 Name: stateId,
108 Diff: &diff,
109 },
110
111 // Stop here if we don't actually have a diff
112 &EvalIf{
113 If: func(ctx EvalContext) (bool, error) {
114 if diff == nil {
115 return true, EvalEarlyExitError{}
116 }
117
118 if diff.GetAttributesLen() == 0 {
119 return true, EvalEarlyExitError{}
120 }
121
122 return true, nil
123 },
124 Then: EvalNoop{},
125 },
126
127 // We need to re-interpolate the config here, rather than
128 // just using the diff's values directly, because we've
129 // potentially learned more variable values during the
130 // apply pass that weren't known when the diff was produced.
131 &EvalInterpolate{
132 Config: n.Config.RawConfig.Copy(),
133 Resource: resource,
134 Output: &config,
135 },
136
137 &EvalGetProvider{
138 Name: n.ProvidedBy()[0],
139 Output: &provider,
140 },
141
142 // Make a new diff with our newly-interpolated config.
143 &EvalReadDataDiff{
144 Info: info,
145 Config: &config,
146 Previous: &diff,
147 Provider: &provider,
148 Output: &diff,
149 },
150
151 &EvalReadDataApply{
152 Info: info,
153 Diff: &diff,
154 Provider: &provider,
155 Output: &state,
156 },
157
158 &EvalWriteState{
159 Name: stateId,
160 ResourceType: n.Config.Type,
161 Provider: n.Config.Provider,
162 Dependencies: stateDeps,
163 State: &state,
164 },
165
166 // Clear the diff now that we've applied it, so
167 // later nodes won't see a diff that's now a no-op.
168 &EvalWriteDiff{
169 Name: stateId,
170 Diff: nil,
171 },
172
173 &EvalUpdateStateHook{},
174 },
175 }
176}
177
178func (n *NodeApplyableResource) evalTreeManagedResource(
179 stateId string, info *InstanceInfo,
180 resource *Resource, stateDeps []string) EvalNode {
181 // Declare a bunch of variables that are used for state during
182 // evaluation. Most of this are written to by-address below.
183 var provider ResourceProvider
184 var diff, diffApply *InstanceDiff
185 var state *InstanceState
186 var resourceConfig *ResourceConfig
187 var err error
188 var createNew bool
189 var createBeforeDestroyEnabled bool
190
191 return &EvalSequence{
192 Nodes: []EvalNode{
193 // Build the instance info
194 &EvalInstanceInfo{
195 Info: info,
196 },
197
198 // Get the saved diff for apply
199 &EvalReadDiff{
200 Name: stateId,
201 Diff: &diffApply,
202 },
203
204 // We don't want to do any destroys
205 &EvalIf{
206 If: func(ctx EvalContext) (bool, error) {
207 if diffApply == nil {
208 return true, EvalEarlyExitError{}
209 }
210
211 if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 {
212 return true, EvalEarlyExitError{}
213 }
214
215 diffApply.SetDestroy(false)
216 return true, nil
217 },
218 Then: EvalNoop{},
219 },
220
221 &EvalIf{
222 If: func(ctx EvalContext) (bool, error) {
223 destroy := false
224 if diffApply != nil {
225 destroy = diffApply.GetDestroy() || diffApply.RequiresNew()
226 }
227
228 createBeforeDestroyEnabled =
229 n.Config.Lifecycle.CreateBeforeDestroy &&
230 destroy
231
232 return createBeforeDestroyEnabled, nil
233 },
234 Then: &EvalDeposeState{
235 Name: stateId,
236 },
237 },
238
239 &EvalInterpolate{
240 Config: n.Config.RawConfig.Copy(),
241 Resource: resource,
242 Output: &resourceConfig,
243 },
244 &EvalGetProvider{
245 Name: n.ProvidedBy()[0],
246 Output: &provider,
247 },
248 &EvalReadState{
249 Name: stateId,
250 Output: &state,
251 },
252 // Re-run validation to catch any errors we missed, e.g. type
253 // mismatches on computed values.
254 &EvalValidateResource{
255 Provider: &provider,
256 Config: &resourceConfig,
257 ResourceName: n.Config.Name,
258 ResourceType: n.Config.Type,
259 ResourceMode: n.Config.Mode,
260 IgnoreWarnings: true,
261 },
262 &EvalDiff{
263 Info: info,
264 Config: &resourceConfig,
265 Resource: n.Config,
266 Provider: &provider,
267 Diff: &diffApply,
268 State: &state,
269 OutputDiff: &diffApply,
270 },
271
272 // Get the saved diff
273 &EvalReadDiff{
274 Name: stateId,
275 Diff: &diff,
276 },
277
278 // Compare the diffs
279 &EvalCompareDiff{
280 Info: info,
281 One: &diff,
282 Two: &diffApply,
283 },
284
285 &EvalGetProvider{
286 Name: n.ProvidedBy()[0],
287 Output: &provider,
288 },
289 &EvalReadState{
290 Name: stateId,
291 Output: &state,
292 },
293 // Call pre-apply hook
294 &EvalApplyPre{
295 Info: info,
296 State: &state,
297 Diff: &diffApply,
298 },
299 &EvalApply{
300 Info: info,
301 State: &state,
302 Diff: &diffApply,
303 Provider: &provider,
304 Output: &state,
305 Error: &err,
306 CreateNew: &createNew,
307 },
308 &EvalWriteState{
309 Name: stateId,
310 ResourceType: n.Config.Type,
311 Provider: n.Config.Provider,
312 Dependencies: stateDeps,
313 State: &state,
314 },
315 &EvalApplyProvisioners{
316 Info: info,
317 State: &state,
318 Resource: n.Config,
319 InterpResource: resource,
320 CreateNew: &createNew,
321 Error: &err,
322 When: config.ProvisionerWhenCreate,
323 },
324 &EvalIf{
325 If: func(ctx EvalContext) (bool, error) {
326 return createBeforeDestroyEnabled && err != nil, nil
327 },
328 Then: &EvalUndeposeState{
329 Name: stateId,
330 State: &state,
331 },
332 Else: &EvalWriteState{
333 Name: stateId,
334 ResourceType: n.Config.Type,
335 Provider: n.Config.Provider,
336 Dependencies: stateDeps,
337 State: &state,
338 },
339 },
340
341 // We clear the diff out here so that future nodes
342 // don't see a diff that is already complete. There
343 // is no longer a diff!
344 &EvalWriteDiff{
345 Name: stateId,
346 Diff: nil,
347 },
348
349 &EvalApplyPost{
350 Info: info,
351 State: &state,
352 Error: &err,
353 },
354 &EvalUpdateStateHook{},
355 },
356 }
357}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
new file mode 100644
index 0000000..c2efd2c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
@@ -0,0 +1,288 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeDestroyResource represents a resource that is to be destroyed.
10type NodeDestroyResource struct {
11 *NodeAbstractResource
12}
13
14func (n *NodeDestroyResource) Name() string {
15 return n.NodeAbstractResource.Name() + " (destroy)"
16}
17
18// GraphNodeDestroyer
19func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress {
20 return n.Addr
21}
22
23// GraphNodeDestroyerCBD
24func (n *NodeDestroyResource) CreateBeforeDestroy() bool {
25 // If we have no config, we just assume no
26 if n.Config == nil {
27 return false
28 }
29
30 return n.Config.Lifecycle.CreateBeforeDestroy
31}
32
33// GraphNodeDestroyerCBD
34func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error {
35 // If we have no config, do nothing since it won't affect the
36 // create step anyways.
37 if n.Config == nil {
38 return nil
39 }
40
41 // Set CBD to true
42 n.Config.Lifecycle.CreateBeforeDestroy = true
43
44 return nil
45}
46
47// GraphNodeReferenceable, overriding NodeAbstractResource
48func (n *NodeDestroyResource) ReferenceableName() []string {
49 // We modify our referenceable name to have the suffix of ".destroy"
50 // since depending on the creation side doesn't necessarilly mean
51 // depending on destruction.
52 suffix := ".destroy"
53
54 // If we're CBD, we also append "-cbd". This is because CBD will setup
55 // its own edges (in CBDEdgeTransformer). Depending on the "destroy"
56 // side generally doesn't mean depending on CBD as well. See GH-11349
57 if n.CreateBeforeDestroy() {
58 suffix += "-cbd"
59 }
60
61 result := n.NodeAbstractResource.ReferenceableName()
62 for i, v := range result {
63 result[i] = v + suffix
64 }
65
66 return result
67}
68
69// GraphNodeReferencer, overriding NodeAbstractResource
70func (n *NodeDestroyResource) References() []string {
71 // If we have a config, then we need to include destroy-time dependencies
72 if c := n.Config; c != nil {
73 var result []string
74 for _, p := range c.Provisioners {
75 // We include conn info and config for destroy time provisioners
76 // as dependencies that we have.
77 if p.When == config.ProvisionerWhenDestroy {
78 result = append(result, ReferencesFromConfig(p.ConnInfo)...)
79 result = append(result, ReferencesFromConfig(p.RawConfig)...)
80 }
81 }
82
83 return result
84 }
85
86 return nil
87}
88
89// GraphNodeDynamicExpandable
90func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
91 // If we have no config we do nothing
92 if n.Addr == nil {
93 return nil, nil
94 }
95
96 state, lock := ctx.State()
97 lock.RLock()
98 defer lock.RUnlock()
99
100 // Start creating the steps
101 steps := make([]GraphTransformer, 0, 5)
102
103 // We want deposed resources in the state to be destroyed
104 steps = append(steps, &DeposedTransformer{
105 State: state,
106 View: n.Addr.stateId(),
107 })
108
109 // Target
110 steps = append(steps, &TargetsTransformer{
111 ParsedTargets: n.Targets,
112 })
113
114 // Always end with the root being added
115 steps = append(steps, &RootTransformer{})
116
117 // Build the graph
118 b := &BasicGraphBuilder{
119 Steps: steps,
120 Name: "NodeResourceDestroy",
121 }
122 return b.Build(ctx.Path())
123}
124
125// GraphNodeEvalable
126func (n *NodeDestroyResource) EvalTree() EvalNode {
127 // stateId is the ID to put into the state
128 stateId := n.Addr.stateId()
129
130 // Build the instance info. More of this will be populated during eval
131 info := &InstanceInfo{
132 Id: stateId,
133 Type: n.Addr.Type,
134 uniqueExtra: "destroy",
135 }
136
137 // Build the resource for eval
138 addr := n.Addr
139 resource := &Resource{
140 Name: addr.Name,
141 Type: addr.Type,
142 CountIndex: addr.Index,
143 }
144 if resource.CountIndex < 0 {
145 resource.CountIndex = 0
146 }
147
148 // Get our state
149 rs := n.ResourceState
150 if rs == nil {
151 rs = &ResourceState{}
152 }
153
154 var diffApply *InstanceDiff
155 var provider ResourceProvider
156 var state *InstanceState
157 var err error
158 return &EvalOpFilter{
159 Ops: []walkOperation{walkApply, walkDestroy},
160 Node: &EvalSequence{
161 Nodes: []EvalNode{
162 // Get the saved diff for apply
163 &EvalReadDiff{
164 Name: stateId,
165 Diff: &diffApply,
166 },
167
168 // Filter the diff so we only get the destroy
169 &EvalFilterDiff{
170 Diff: &diffApply,
171 Output: &diffApply,
172 Destroy: true,
173 },
174
175 // If we're not destroying, then compare diffs
176 &EvalIf{
177 If: func(ctx EvalContext) (bool, error) {
178 if diffApply != nil && diffApply.GetDestroy() {
179 return true, nil
180 }
181
182 return true, EvalEarlyExitError{}
183 },
184 Then: EvalNoop{},
185 },
186
187 // Load the instance info so we have the module path set
188 &EvalInstanceInfo{Info: info},
189
190 &EvalGetProvider{
191 Name: n.ProvidedBy()[0],
192 Output: &provider,
193 },
194 &EvalReadState{
195 Name: stateId,
196 Output: &state,
197 },
198 &EvalRequireState{
199 State: &state,
200 },
201
202 // Call pre-apply hook
203 &EvalApplyPre{
204 Info: info,
205 State: &state,
206 Diff: &diffApply,
207 },
208
209 // Run destroy provisioners if not tainted
210 &EvalIf{
211 If: func(ctx EvalContext) (bool, error) {
212 if state != nil && state.Tainted {
213 return false, nil
214 }
215
216 return true, nil
217 },
218
219 Then: &EvalApplyProvisioners{
220 Info: info,
221 State: &state,
222 Resource: n.Config,
223 InterpResource: resource,
224 Error: &err,
225 When: config.ProvisionerWhenDestroy,
226 },
227 },
228
229 // If we have a provisioning error, then we just call
230 // the post-apply hook now.
231 &EvalIf{
232 If: func(ctx EvalContext) (bool, error) {
233 return err != nil, nil
234 },
235
236 Then: &EvalApplyPost{
237 Info: info,
238 State: &state,
239 Error: &err,
240 },
241 },
242
243 // Make sure we handle data sources properly.
244 &EvalIf{
245 If: func(ctx EvalContext) (bool, error) {
246 if n.Addr == nil {
247 return false, fmt.Errorf("nil address")
248 }
249
250 if n.Addr.Mode == config.DataResourceMode {
251 return true, nil
252 }
253
254 return false, nil
255 },
256
257 Then: &EvalReadDataApply{
258 Info: info,
259 Diff: &diffApply,
260 Provider: &provider,
261 Output: &state,
262 },
263 Else: &EvalApply{
264 Info: info,
265 State: &state,
266 Diff: &diffApply,
267 Provider: &provider,
268 Output: &state,
269 Error: &err,
270 },
271 },
272 &EvalWriteState{
273 Name: stateId,
274 ResourceType: n.Addr.Type,
275 Provider: rs.Provider,
276 Dependencies: rs.Dependencies,
277 State: &state,
278 },
279 &EvalApplyPost{
280 Info: info,
281 State: &state,
282 Error: &err,
283 },
284 &EvalUpdateStateHook{},
285 },
286 },
287 }
288}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
new file mode 100644
index 0000000..52bbf88
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
@@ -0,0 +1,83 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// NodePlannableResource represents a resource that is "plannable":
8// it is ready to be planned in order to create a diff.
9type NodePlannableResource struct {
10 *NodeAbstractCountResource
11}
12
13// GraphNodeDynamicExpandable
14func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
15 // Grab the state which we read
16 state, lock := ctx.State()
17 lock.RLock()
18 defer lock.RUnlock()
19
20 // Expand the resource count which must be available by now from EvalTree
21 count, err := n.Config.Count()
22 if err != nil {
23 return nil, err
24 }
25
26 // The concrete resource factory we'll use
27 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
28 // Add the config and state since we don't do that via transforms
29 a.Config = n.Config
30
31 return &NodePlannableResourceInstance{
32 NodeAbstractResource: a,
33 }
34 }
35
36 // The concrete resource factory we'll use for oprhans
37 concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex {
38 // Add the config and state since we don't do that via transforms
39 a.Config = n.Config
40
41 return &NodePlannableResourceOrphan{
42 NodeAbstractResource: a,
43 }
44 }
45
46 // Start creating the steps
47 steps := []GraphTransformer{
48 // Expand the count.
49 &ResourceCountTransformer{
50 Concrete: concreteResource,
51 Count: count,
52 Addr: n.ResourceAddr(),
53 },
54
55 // Add the count orphans
56 &OrphanResourceCountTransformer{
57 Concrete: concreteResourceOrphan,
58 Count: count,
59 Addr: n.ResourceAddr(),
60 State: state,
61 },
62
63 // Attach the state
64 &AttachStateTransformer{State: state},
65
66 // Targeting
67 &TargetsTransformer{ParsedTargets: n.Targets},
68
69 // Connect references so ordering is correct
70 &ReferenceTransformer{},
71
72 // Make sure there is a single root
73 &RootTransformer{},
74 }
75
76 // Build the graph
77 b := &BasicGraphBuilder{
78 Steps: steps,
79 Validate: true,
80 Name: "NodePlannableResource",
81 }
82 return b.Build(ctx.Path())
83}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
new file mode 100644
index 0000000..9b02362
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
@@ -0,0 +1,53 @@
1package terraform
2
3// NodePlanDestroyableResource represents a resource that is "applyable":
4// it is ready to be applied and is represented by a diff.
5type NodePlanDestroyableResource struct {
6 *NodeAbstractResource
7}
8
9// GraphNodeDestroyer
10func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress {
11 return n.Addr
12}
13
14// GraphNodeEvalable
15func (n *NodePlanDestroyableResource) EvalTree() EvalNode {
16 addr := n.NodeAbstractResource.Addr
17
18 // stateId is the ID to put into the state
19 stateId := addr.stateId()
20
21 // Build the instance info. More of this will be populated during eval
22 info := &InstanceInfo{
23 Id: stateId,
24 Type: addr.Type,
25 }
26
27 // Declare a bunch of variables that are used for state during
28 // evaluation. Most of this are written to by-address below.
29 var diff *InstanceDiff
30 var state *InstanceState
31
32 return &EvalSequence{
33 Nodes: []EvalNode{
34 &EvalReadState{
35 Name: stateId,
36 Output: &state,
37 },
38 &EvalDiffDestroy{
39 Info: info,
40 State: &state,
41 Output: &diff,
42 },
43 &EvalCheckPreventDestroy{
44 Resource: n.Config,
45 Diff: &diff,
46 },
47 &EvalWriteDiff{
48 Name: stateId,
49 Diff: &diff,
50 },
51 },
52 }
53}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
new file mode 100644
index 0000000..b529569
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
@@ -0,0 +1,190 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodePlannableResourceInstance represents a _single_ resource
10// instance that is plannable. This means this represents a single
11// count index, for example.
12type NodePlannableResourceInstance struct {
13 *NodeAbstractResource
14}
15
16// GraphNodeEvalable
17func (n *NodePlannableResourceInstance) EvalTree() EvalNode {
18 addr := n.NodeAbstractResource.Addr
19
20 // stateId is the ID to put into the state
21 stateId := addr.stateId()
22
23 // Build the instance info. More of this will be populated during eval
24 info := &InstanceInfo{
25 Id: stateId,
26 Type: addr.Type,
27 ModulePath: normalizeModulePath(addr.Path),
28 }
29
30 // Build the resource for eval
31 resource := &Resource{
32 Name: addr.Name,
33 Type: addr.Type,
34 CountIndex: addr.Index,
35 }
36 if resource.CountIndex < 0 {
37 resource.CountIndex = 0
38 }
39
40 // Determine the dependencies for the state.
41 stateDeps := n.StateReferences()
42
43 // Eval info is different depending on what kind of resource this is
44 switch n.Config.Mode {
45 case config.ManagedResourceMode:
46 return n.evalTreeManagedResource(
47 stateId, info, resource, stateDeps,
48 )
49 case config.DataResourceMode:
50 return n.evalTreeDataResource(
51 stateId, info, resource, stateDeps)
52 default:
53 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
54 }
55}
56
57func (n *NodePlannableResourceInstance) evalTreeDataResource(
58 stateId string, info *InstanceInfo,
59 resource *Resource, stateDeps []string) EvalNode {
60 var provider ResourceProvider
61 var config *ResourceConfig
62 var diff *InstanceDiff
63 var state *InstanceState
64
65 return &EvalSequence{
66 Nodes: []EvalNode{
67 &EvalReadState{
68 Name: stateId,
69 Output: &state,
70 },
71
72 // We need to re-interpolate the config here because some
73 // of the attributes may have become computed during
74 // earlier planning, due to other resources having
75 // "requires new resource" diffs.
76 &EvalInterpolate{
77 Config: n.Config.RawConfig.Copy(),
78 Resource: resource,
79 Output: &config,
80 },
81
82 &EvalIf{
83 If: func(ctx EvalContext) (bool, error) {
84 computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0
85
86 // If the configuration is complete and we
87 // already have a state then we don't need to
88 // do any further work during apply, because we
89 // already populated the state during refresh.
90 if !computed && state != nil {
91 return true, EvalEarlyExitError{}
92 }
93
94 return true, nil
95 },
96 Then: EvalNoop{},
97 },
98
99 &EvalGetProvider{
100 Name: n.ProvidedBy()[0],
101 Output: &provider,
102 },
103
104 &EvalReadDataDiff{
105 Info: info,
106 Config: &config,
107 Provider: &provider,
108 Output: &diff,
109 OutputState: &state,
110 },
111
112 &EvalWriteState{
113 Name: stateId,
114 ResourceType: n.Config.Type,
115 Provider: n.Config.Provider,
116 Dependencies: stateDeps,
117 State: &state,
118 },
119
120 &EvalWriteDiff{
121 Name: stateId,
122 Diff: &diff,
123 },
124 },
125 }
126}
127
128func (n *NodePlannableResourceInstance) evalTreeManagedResource(
129 stateId string, info *InstanceInfo,
130 resource *Resource, stateDeps []string) EvalNode {
131 // Declare a bunch of variables that are used for state during
132 // evaluation. Most of this are written to by-address below.
133 var provider ResourceProvider
134 var diff *InstanceDiff
135 var state *InstanceState
136 var resourceConfig *ResourceConfig
137
138 return &EvalSequence{
139 Nodes: []EvalNode{
140 &EvalInterpolate{
141 Config: n.Config.RawConfig.Copy(),
142 Resource: resource,
143 Output: &resourceConfig,
144 },
145 &EvalGetProvider{
146 Name: n.ProvidedBy()[0],
147 Output: &provider,
148 },
149 // Re-run validation to catch any errors we missed, e.g. type
150 // mismatches on computed values.
151 &EvalValidateResource{
152 Provider: &provider,
153 Config: &resourceConfig,
154 ResourceName: n.Config.Name,
155 ResourceType: n.Config.Type,
156 ResourceMode: n.Config.Mode,
157 IgnoreWarnings: true,
158 },
159 &EvalReadState{
160 Name: stateId,
161 Output: &state,
162 },
163 &EvalDiff{
164 Name: stateId,
165 Info: info,
166 Config: &resourceConfig,
167 Resource: n.Config,
168 Provider: &provider,
169 State: &state,
170 OutputDiff: &diff,
171 OutputState: &state,
172 },
173 &EvalCheckPreventDestroy{
174 Resource: n.Config,
175 Diff: &diff,
176 },
177 &EvalWriteState{
178 Name: stateId,
179 ResourceType: n.Config.Type,
180 Provider: n.Config.Provider,
181 Dependencies: stateDeps,
182 State: &state,
183 },
184 &EvalWriteDiff{
185 Name: stateId,
186 Diff: &diff,
187 },
188 },
189 }
190}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
new file mode 100644
index 0000000..73d6e41
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
@@ -0,0 +1,54 @@
1package terraform
2
3// NodePlannableResourceOrphan represents a resource that is "applyable":
4// it is ready to be applied and is represented by a diff.
5type NodePlannableResourceOrphan struct {
6 *NodeAbstractResource
7}
8
9func (n *NodePlannableResourceOrphan) Name() string {
10 return n.NodeAbstractResource.Name() + " (orphan)"
11}
12
13// GraphNodeEvalable
14func (n *NodePlannableResourceOrphan) EvalTree() EvalNode {
15 addr := n.NodeAbstractResource.Addr
16
17 // stateId is the ID to put into the state
18 stateId := addr.stateId()
19
20 // Build the instance info. More of this will be populated during eval
21 info := &InstanceInfo{
22 Id: stateId,
23 Type: addr.Type,
24 ModulePath: normalizeModulePath(addr.Path),
25 }
26
27 // Declare a bunch of variables that are used for state during
28 // evaluation. Most of this are written to by-address below.
29 var diff *InstanceDiff
30 var state *InstanceState
31
32 return &EvalSequence{
33 Nodes: []EvalNode{
34 &EvalReadState{
35 Name: stateId,
36 Output: &state,
37 },
38 &EvalDiffDestroy{
39 Info: info,
40 State: &state,
41 Output: &diff,
42 },
43 &EvalCheckPreventDestroy{
44 Resource: n.Config,
45 ResourceId: stateId,
46 Diff: &diff,
47 },
48 &EvalWriteDiff{
49 Name: stateId,
50 Diff: &diff,
51 },
52 },
53 }
54}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
new file mode 100644
index 0000000..3a44926
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
@@ -0,0 +1,100 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeRefreshableResource represents a resource that is "applyable":
10// it is ready to be applied and is represented by a diff.
11type NodeRefreshableResource struct {
12 *NodeAbstractResource
13}
14
15// GraphNodeDestroyer
16func (n *NodeRefreshableResource) DestroyAddr() *ResourceAddress {
17 return n.Addr
18}
19
20// GraphNodeEvalable
21func (n *NodeRefreshableResource) EvalTree() EvalNode {
22 // Eval info is different depending on what kind of resource this is
23 switch mode := n.Addr.Mode; mode {
24 case config.ManagedResourceMode:
25 return n.evalTreeManagedResource()
26
27 case config.DataResourceMode:
28 // Get the data source node. If we don't have a configuration
29 // then it is an orphan so we destroy it (remove it from the state).
30 var dn GraphNodeEvalable
31 if n.Config != nil {
32 dn = &NodeRefreshableDataResourceInstance{
33 NodeAbstractResource: n.NodeAbstractResource,
34 }
35 } else {
36 dn = &NodeDestroyableDataResource{
37 NodeAbstractResource: n.NodeAbstractResource,
38 }
39 }
40
41 return dn.EvalTree()
42 default:
43 panic(fmt.Errorf("unsupported resource mode %s", mode))
44 }
45}
46
47func (n *NodeRefreshableResource) evalTreeManagedResource() EvalNode {
48 addr := n.NodeAbstractResource.Addr
49
50 // stateId is the ID to put into the state
51 stateId := addr.stateId()
52
53 // Build the instance info. More of this will be populated during eval
54 info := &InstanceInfo{
55 Id: stateId,
56 Type: addr.Type,
57 }
58
59 // Declare a bunch of variables that are used for state during
60 // evaluation. Most of this are written to by-address below.
61 var provider ResourceProvider
62 var state *InstanceState
63
64 // This happened during initial development. All known cases were
65 // fixed and tested but as a sanity check let's assert here.
66 if n.ResourceState == nil {
67 err := fmt.Errorf(
68 "No resource state attached for addr: %s\n\n"+
69 "This is a bug. Please report this to Terraform with your configuration\n"+
70 "and state attached. Please be careful to scrub any sensitive information.",
71 addr)
72 return &EvalReturnError{Error: &err}
73 }
74
75 return &EvalSequence{
76 Nodes: []EvalNode{
77 &EvalGetProvider{
78 Name: n.ProvidedBy()[0],
79 Output: &provider,
80 },
81 &EvalReadState{
82 Name: stateId,
83 Output: &state,
84 },
85 &EvalRefresh{
86 Info: info,
87 Provider: &provider,
88 State: &state,
89 Output: &state,
90 },
91 &EvalWriteState{
92 Name: stateId,
93 ResourceType: n.ResourceState.Type,
94 Provider: n.ResourceState.Provider,
95 Dependencies: n.ResourceState.Dependencies,
96 State: &state,
97 },
98 },
99 }
100}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
new file mode 100644
index 0000000..f528f24
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
@@ -0,0 +1,158 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// NodeValidatableResource represents a resource that is used for validation
8// only.
9type NodeValidatableResource struct {
10 *NodeAbstractCountResource
11}
12
13// GraphNodeEvalable
14func (n *NodeValidatableResource) EvalTree() EvalNode {
15 // Ensure we're validating
16 c := n.NodeAbstractCountResource
17 c.Validate = true
18 return c.EvalTree()
19}
20
21// GraphNodeDynamicExpandable
22func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
23 // Grab the state which we read
24 state, lock := ctx.State()
25 lock.RLock()
26 defer lock.RUnlock()
27
28 // Expand the resource count which must be available by now from EvalTree
29 count := 1
30 if n.Config.RawCount.Value() != unknownValue() {
31 var err error
32 count, err = n.Config.Count()
33 if err != nil {
34 return nil, err
35 }
36 }
37
38 // The concrete resource factory we'll use
39 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
40 // Add the config and state since we don't do that via transforms
41 a.Config = n.Config
42
43 return &NodeValidatableResourceInstance{
44 NodeAbstractResource: a,
45 }
46 }
47
48 // Start creating the steps
49 steps := []GraphTransformer{
50 // Expand the count.
51 &ResourceCountTransformer{
52 Concrete: concreteResource,
53 Count: count,
54 Addr: n.ResourceAddr(),
55 },
56
57 // Attach the state
58 &AttachStateTransformer{State: state},
59
60 // Targeting
61 &TargetsTransformer{ParsedTargets: n.Targets},
62
63 // Connect references so ordering is correct
64 &ReferenceTransformer{},
65
66 // Make sure there is a single root
67 &RootTransformer{},
68 }
69
70 // Build the graph
71 b := &BasicGraphBuilder{
72 Steps: steps,
73 Validate: true,
74 Name: "NodeValidatableResource",
75 }
76
77 return b.Build(ctx.Path())
78}
79
80// This represents a _single_ resource instance to validate.
81type NodeValidatableResourceInstance struct {
82 *NodeAbstractResource
83}
84
85// GraphNodeEvalable
86func (n *NodeValidatableResourceInstance) EvalTree() EvalNode {
87 addr := n.NodeAbstractResource.Addr
88
89 // Build the resource for eval
90 resource := &Resource{
91 Name: addr.Name,
92 Type: addr.Type,
93 CountIndex: addr.Index,
94 }
95 if resource.CountIndex < 0 {
96 resource.CountIndex = 0
97 }
98
99 // Declare a bunch of variables that are used for state during
100 // evaluation. Most of this are written to by-address below.
101 var config *ResourceConfig
102 var provider ResourceProvider
103
104 seq := &EvalSequence{
105 Nodes: []EvalNode{
106 &EvalValidateResourceSelfRef{
107 Addr: &addr,
108 Config: &n.Config.RawConfig,
109 },
110 &EvalGetProvider{
111 Name: n.ProvidedBy()[0],
112 Output: &provider,
113 },
114 &EvalInterpolate{
115 Config: n.Config.RawConfig.Copy(),
116 Resource: resource,
117 Output: &config,
118 },
119 &EvalValidateResource{
120 Provider: &provider,
121 Config: &config,
122 ResourceName: n.Config.Name,
123 ResourceType: n.Config.Type,
124 ResourceMode: n.Config.Mode,
125 },
126 },
127 }
128
129 // Validate all the provisioners
130 for _, p := range n.Config.Provisioners {
131 var provisioner ResourceProvisioner
132 var connConfig *ResourceConfig
133 seq.Nodes = append(
134 seq.Nodes,
135 &EvalGetProvisioner{
136 Name: p.Type,
137 Output: &provisioner,
138 },
139 &EvalInterpolate{
140 Config: p.RawConfig.Copy(),
141 Resource: resource,
142 Output: &config,
143 },
144 &EvalInterpolate{
145 Config: p.ConnInfo.Copy(),
146 Resource: resource,
147 Output: &connConfig,
148 },
149 &EvalValidateProvisioner{
150 Provisioner: &provisioner,
151 Config: &config,
152 ConnConfig: &connConfig,
153 },
154 )
155 }
156
157 return seq
158}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
new file mode 100644
index 0000000..cb61a4e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
@@ -0,0 +1,22 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// NodeRootVariable represents a root variable input.
10type NodeRootVariable struct {
11 Config *config.Variable
12}
13
14func (n *NodeRootVariable) Name() string {
15 result := fmt.Sprintf("var.%s", n.Config.Name)
16 return result
17}
18
19// GraphNodeReferenceable
20func (n *NodeRootVariable) ReferenceableName() []string {
21 return []string{n.Name()}
22}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go
new file mode 100644
index 0000000..ca99685
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/path.go
@@ -0,0 +1,24 @@
1package terraform
2
3import (
4 "crypto/md5"
5 "encoding/hex"
6)
7
8// PathCacheKey returns a cache key for a module path.
9//
10// TODO: test
11func PathCacheKey(path []string) string {
12 // There is probably a better way to do this, but this is working for now.
13 // We just create an MD5 hash of all the MD5 hashes of all the path
14 // elements. This gets us the property that it is unique per ordering.
15 hash := md5.New()
16 for _, p := range path {
17 single := md5.Sum([]byte(p))
18 if _, err := hash.Write(single[:]); err != nil {
19 panic(err)
20 }
21 }
22
23 return hex.EncodeToString(hash.Sum(nil))
24}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go
new file mode 100644
index 0000000..ea08845
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go
@@ -0,0 +1,153 @@
1package terraform
2
3import (
4 "bytes"
5 "encoding/gob"
6 "errors"
7 "fmt"
8 "io"
9 "sync"
10
11 "github.com/hashicorp/terraform/config/module"
12)
13
14func init() {
15 gob.Register(make([]interface{}, 0))
16 gob.Register(make([]map[string]interface{}, 0))
17 gob.Register(make(map[string]interface{}))
18 gob.Register(make(map[string]string))
19}
20
21// Plan represents a single Terraform execution plan, which contains
22// all the information necessary to make an infrastructure change.
23//
24// A plan has to contain basically the entire state of the world
25// necessary to make a change: the state, diff, config, backend config, etc.
26// This is so that it can run alone without any other data.
27type Plan struct {
28 Diff *Diff
29 Module *module.Tree
30 State *State
31 Vars map[string]interface{}
32 Targets []string
33
34 // Backend is the backend that this plan should use and store data with.
35 Backend *BackendState
36
37 once sync.Once
38}
39
40// Context returns a Context with the data encapsulated in this plan.
41//
42// The following fields in opts are overridden by the plan: Config,
43// Diff, State, Variables.
44func (p *Plan) Context(opts *ContextOpts) (*Context, error) {
45 opts.Diff = p.Diff
46 opts.Module = p.Module
47 opts.State = p.State
48 opts.Targets = p.Targets
49
50 opts.Variables = make(map[string]interface{})
51 for k, v := range p.Vars {
52 opts.Variables[k] = v
53 }
54
55 return NewContext(opts)
56}
57
58func (p *Plan) String() string {
59 buf := new(bytes.Buffer)
60 buf.WriteString("DIFF:\n\n")
61 buf.WriteString(p.Diff.String())
62 buf.WriteString("\n\nSTATE:\n\n")
63 buf.WriteString(p.State.String())
64 return buf.String()
65}
66
67func (p *Plan) init() {
68 p.once.Do(func() {
69 if p.Diff == nil {
70 p.Diff = new(Diff)
71 p.Diff.init()
72 }
73
74 if p.State == nil {
75 p.State = new(State)
76 p.State.init()
77 }
78
79 if p.Vars == nil {
80 p.Vars = make(map[string]interface{})
81 }
82 })
83}
84
85// The format byte is prefixed into the plan file format so that we have
86// the ability in the future to change the file format if we want for any
87// reason.
88const planFormatMagic = "tfplan"
89const planFormatVersion byte = 1
90
91// ReadPlan reads a plan structure out of a reader in the format that
92// was written by WritePlan.
93func ReadPlan(src io.Reader) (*Plan, error) {
94 var result *Plan
95 var err error
96 n := 0
97
98 // Verify the magic bytes
99 magic := make([]byte, len(planFormatMagic))
100 for n < len(magic) {
101 n, err = src.Read(magic[n:])
102 if err != nil {
103 return nil, fmt.Errorf("error while reading magic bytes: %s", err)
104 }
105 }
106 if string(magic) != planFormatMagic {
107 return nil, fmt.Errorf("not a valid plan file")
108 }
109
110 // Verify the version is something we can read
111 var formatByte [1]byte
112 n, err = src.Read(formatByte[:])
113 if err != nil {
114 return nil, err
115 }
116 if n != len(formatByte) {
117 return nil, errors.New("failed to read plan version byte")
118 }
119
120 if formatByte[0] != planFormatVersion {
121 return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0])
122 }
123
124 dec := gob.NewDecoder(src)
125 if err := dec.Decode(&result); err != nil {
126 return nil, err
127 }
128
129 return result, nil
130}
131
132// WritePlan writes a plan somewhere in a binary format.
133func WritePlan(d *Plan, dst io.Writer) error {
134 // Write the magic bytes so we can determine the file format later
135 n, err := dst.Write([]byte(planFormatMagic))
136 if err != nil {
137 return err
138 }
139 if n != len(planFormatMagic) {
140 return errors.New("failed to write plan format magic bytes")
141 }
142
143 // Write a version byte so we can iterate on version at some point
144 n, err = dst.Write([]byte{planFormatVersion})
145 if err != nil {
146 return err
147 }
148 if n != 1 {
149 return errors.New("failed to write plan version byte")
150 }
151
152 return gob.NewEncoder(dst).Encode(d)
153}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go
new file mode 100644
index 0000000..0acf0be
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go
@@ -0,0 +1,360 @@
1package terraform
2
3import (
4 "fmt"
5 "reflect"
6 "sort"
7 "strconv"
8 "strings"
9
10 "github.com/hashicorp/terraform/config"
11 "github.com/mitchellh/copystructure"
12 "github.com/mitchellh/reflectwalk"
13)
14
15// ResourceProvisionerConfig is used to pair a provisioner
16// with its provided configuration. This allows us to use singleton
17// instances of each ResourceProvisioner and to keep the relevant
18// configuration instead of instantiating a new Provisioner for each
19// resource.
20type ResourceProvisionerConfig struct {
21 Type string
22 Provisioner ResourceProvisioner
23 Config *ResourceConfig
24 RawConfig *config.RawConfig
25 ConnInfo *config.RawConfig
26}
27
28// Resource encapsulates a resource, its configuration, its provider,
29// its current state, and potentially a desired diff from the state it
30// wants to reach.
31type Resource struct {
32 // These are all used by the new EvalNode stuff.
33 Name string
34 Type string
35 CountIndex int
36
37 // These aren't really used anymore anywhere, but we keep them around
38 // since we haven't done a proper cleanup yet.
39 Id string
40 Info *InstanceInfo
41 Config *ResourceConfig
42 Dependencies []string
43 Diff *InstanceDiff
44 Provider ResourceProvider
45 State *InstanceState
46 Provisioners []*ResourceProvisionerConfig
47 Flags ResourceFlag
48}
49
50// ResourceKind specifies what kind of instance we're working with, whether
51// its a primary instance, a tainted instance, or an orphan.
52type ResourceFlag byte
53
54// InstanceInfo is used to hold information about the instance and/or
55// resource being modified.
56type InstanceInfo struct {
57 // Id is a unique name to represent this instance. This is not related
58 // to InstanceState.ID in any way.
59 Id string
60
61 // ModulePath is the complete path of the module containing this
62 // instance.
63 ModulePath []string
64
65 // Type is the resource type of this instance
66 Type string
67
68 // uniqueExtra is an internal field that can be populated to supply
69 // extra metadata that is used to identify a unique instance in
70 // the graph walk. This will be appended to HumanID when uniqueId
71 // is called.
72 uniqueExtra string
73}
74
75// HumanId is a unique Id that is human-friendly and useful for UI elements.
76func (i *InstanceInfo) HumanId() string {
77 if i == nil {
78 return "<nil>"
79 }
80
81 if len(i.ModulePath) <= 1 {
82 return i.Id
83 }
84
85 return fmt.Sprintf(
86 "module.%s.%s",
87 strings.Join(i.ModulePath[1:], "."),
88 i.Id)
89}
90
91func (i *InstanceInfo) uniqueId() string {
92 prefix := i.HumanId()
93 if v := i.uniqueExtra; v != "" {
94 prefix += " " + v
95 }
96
97 return prefix
98}
99
100// ResourceConfig holds the configuration given for a resource. This is
101// done instead of a raw `map[string]interface{}` type so that rich
102// methods can be added to it to make dealing with it easier.
103type ResourceConfig struct {
104 ComputedKeys []string
105 Raw map[string]interface{}
106 Config map[string]interface{}
107
108 raw *config.RawConfig
109}
110
111// NewResourceConfig creates a new ResourceConfig from a config.RawConfig.
112func NewResourceConfig(c *config.RawConfig) *ResourceConfig {
113 result := &ResourceConfig{raw: c}
114 result.interpolateForce()
115 return result
116}
117
118// DeepCopy performs a deep copy of the configuration. This makes it safe
119// to modify any of the structures that are part of the resource config without
120// affecting the original configuration.
121func (c *ResourceConfig) DeepCopy() *ResourceConfig {
122 // DeepCopying a nil should return a nil to avoid panics
123 if c == nil {
124 return nil
125 }
126
127 // Copy, this will copy all the exported attributes
128 copy, err := copystructure.Config{Lock: true}.Copy(c)
129 if err != nil {
130 panic(err)
131 }
132
133 // Force the type
134 result := copy.(*ResourceConfig)
135
136 // For the raw configuration, we can just use its own copy method
137 result.raw = c.raw.Copy()
138
139 return result
140}
141
142// Equal checks the equality of two resource configs.
143func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool {
144 // If either are nil, then they're only equal if they're both nil
145 if c == nil || c2 == nil {
146 return c == c2
147 }
148
149 // Sort the computed keys so they're deterministic
150 sort.Strings(c.ComputedKeys)
151 sort.Strings(c2.ComputedKeys)
152
153 // Two resource configs if their exported properties are equal.
154 // We don't compare "raw" because it is never used again after
155 // initialization and for all intents and purposes they are equal
156 // if the exported properties are equal.
157 check := [][2]interface{}{
158 {c.ComputedKeys, c2.ComputedKeys},
159 {c.Raw, c2.Raw},
160 {c.Config, c2.Config},
161 }
162 for _, pair := range check {
163 if !reflect.DeepEqual(pair[0], pair[1]) {
164 return false
165 }
166 }
167
168 return true
169}
170
171// CheckSet checks that the given list of configuration keys is
172// properly set. If not, errors are returned for each unset key.
173//
174// This is useful to be called in the Validate method of a ResourceProvider.
175func (c *ResourceConfig) CheckSet(keys []string) []error {
176 var errs []error
177
178 for _, k := range keys {
179 if !c.IsSet(k) {
180 errs = append(errs, fmt.Errorf("%s must be set", k))
181 }
182 }
183
184 return errs
185}
186
187// Get looks up a configuration value by key and returns the value.
188//
189// The second return value is true if the get was successful. Get will
190// return the raw value if the key is computed, so you should pair this
191// with IsComputed.
192func (c *ResourceConfig) Get(k string) (interface{}, bool) {
193 // We aim to get a value from the configuration. If it is computed,
194 // then we return the pure raw value.
195 source := c.Config
196 if c.IsComputed(k) {
197 source = c.Raw
198 }
199
200 return c.get(k, source)
201}
202
203// GetRaw looks up a configuration value by key and returns the value,
204// from the raw, uninterpolated config.
205//
206// The second return value is true if the get was successful. Get will
207// not succeed if the value is being computed.
208func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) {
209 return c.get(k, c.Raw)
210}
211
212// IsComputed returns whether the given key is computed or not.
213func (c *ResourceConfig) IsComputed(k string) bool {
214 // The next thing we do is check the config if we get a computed
215 // value out of it.
216 v, ok := c.get(k, c.Config)
217 if !ok {
218 return false
219 }
220
221 // If value is nil, then it isn't computed
222 if v == nil {
223 return false
224 }
225
226 // Test if the value contains an unknown value
227 var w unknownCheckWalker
228 if err := reflectwalk.Walk(v, &w); err != nil {
229 panic(err)
230 }
231
232 return w.Unknown
233}
234
235// IsSet checks if the key in the configuration is set. A key is set if
236// it has a value or the value is being computed (is unknown currently).
237//
238// This function should be used rather than checking the keys of the
239// raw configuration itself, since a key may be omitted from the raw
240// configuration if it is being computed.
241func (c *ResourceConfig) IsSet(k string) bool {
242 if c == nil {
243 return false
244 }
245
246 if c.IsComputed(k) {
247 return true
248 }
249
250 if _, ok := c.Get(k); ok {
251 return true
252 }
253
254 return false
255}
256
257func (c *ResourceConfig) get(
258 k string, raw map[string]interface{}) (interface{}, bool) {
259 parts := strings.Split(k, ".")
260 if len(parts) == 1 && parts[0] == "" {
261 parts = nil
262 }
263
264 var current interface{} = raw
265 var previous interface{} = nil
266 for i, part := range parts {
267 if current == nil {
268 return nil, false
269 }
270
271 cv := reflect.ValueOf(current)
272 switch cv.Kind() {
273 case reflect.Map:
274 previous = current
275 v := cv.MapIndex(reflect.ValueOf(part))
276 if !v.IsValid() {
277 if i > 0 && i != (len(parts)-1) {
278 tryKey := strings.Join(parts[i:], ".")
279 v := cv.MapIndex(reflect.ValueOf(tryKey))
280 if !v.IsValid() {
281 return nil, false
282 }
283
284 return v.Interface(), true
285 }
286
287 return nil, false
288 }
289
290 current = v.Interface()
291 case reflect.Slice:
292 previous = current
293
294 if part == "#" {
295 // If any value in a list is computed, this whole thing
296 // is computed and we can't read any part of it.
297 for i := 0; i < cv.Len(); i++ {
298 if v := cv.Index(i).Interface(); v == unknownValue() {
299 return v, true
300 }
301 }
302
303 current = cv.Len()
304 } else {
305 i, err := strconv.ParseInt(part, 0, 0)
306 if err != nil {
307 return nil, false
308 }
309 if i >= int64(cv.Len()) {
310 return nil, false
311 }
312 current = cv.Index(int(i)).Interface()
313 }
314 case reflect.String:
315 // This happens when map keys contain "." and have a common
316 // prefix so were split as path components above.
317 actualKey := strings.Join(parts[i-1:], ".")
318 if prevMap, ok := previous.(map[string]interface{}); ok {
319 v, ok := prevMap[actualKey]
320 return v, ok
321 }
322
323 return nil, false
324 default:
325 panic(fmt.Sprintf("Unknown kind: %s", cv.Kind()))
326 }
327 }
328
329 return current, true
330}
331
332// interpolateForce is a temporary thing. We want to get rid of interpolate
333// above and likewise this, but it can only be done after the f-ast-graph
334// refactor is complete.
335func (c *ResourceConfig) interpolateForce() {
336 if c.raw == nil {
337 var err error
338 c.raw, err = config.NewRawConfig(make(map[string]interface{}))
339 if err != nil {
340 panic(err)
341 }
342 }
343
344 c.ComputedKeys = c.raw.UnknownKeys()
345 c.Raw = c.raw.RawMap()
346 c.Config = c.raw.Config()
347}
348
349// unknownCheckWalker
350type unknownCheckWalker struct {
351 Unknown bool
352}
353
354func (w *unknownCheckWalker) Primitive(v reflect.Value) error {
355 if v.Interface() == unknownValue() {
356 w.Unknown = true
357 }
358
359 return nil
360}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
new file mode 100644
index 0000000..a8a0c95
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
@@ -0,0 +1,301 @@
1package terraform
2
3import (
4 "fmt"
5 "reflect"
6 "regexp"
7 "strconv"
8 "strings"
9
10 "github.com/hashicorp/terraform/config"
11)
12
13// ResourceAddress is a way of identifying an individual resource (or,
14// eventually, a subset of resources) within the state. It is used for Targets.
15type ResourceAddress struct {
16 // Addresses a resource falling somewhere in the module path
17 // When specified alone, addresses all resources within a module path
18 Path []string
19
20 // Addresses a specific resource that occurs in a list
21 Index int
22
23 InstanceType InstanceType
24 InstanceTypeSet bool
25 Name string
26 Type string
27 Mode config.ResourceMode // significant only if InstanceTypeSet
28}
29
30// Copy returns a copy of this ResourceAddress
31func (r *ResourceAddress) Copy() *ResourceAddress {
32 if r == nil {
33 return nil
34 }
35
36 n := &ResourceAddress{
37 Path: make([]string, 0, len(r.Path)),
38 Index: r.Index,
39 InstanceType: r.InstanceType,
40 Name: r.Name,
41 Type: r.Type,
42 Mode: r.Mode,
43 }
44 for _, p := range r.Path {
45 n.Path = append(n.Path, p)
46 }
47 return n
48}
49
50// String outputs the address that parses into this address.
51func (r *ResourceAddress) String() string {
52 var result []string
53 for _, p := range r.Path {
54 result = append(result, "module", p)
55 }
56
57 switch r.Mode {
58 case config.ManagedResourceMode:
59 // nothing to do
60 case config.DataResourceMode:
61 result = append(result, "data")
62 default:
63 panic(fmt.Errorf("unsupported resource mode %s", r.Mode))
64 }
65
66 if r.Type != "" {
67 result = append(result, r.Type)
68 }
69
70 if r.Name != "" {
71 name := r.Name
72 if r.InstanceTypeSet {
73 switch r.InstanceType {
74 case TypePrimary:
75 name += ".primary"
76 case TypeDeposed:
77 name += ".deposed"
78 case TypeTainted:
79 name += ".tainted"
80 }
81 }
82
83 if r.Index >= 0 {
84 name += fmt.Sprintf("[%d]", r.Index)
85 }
86 result = append(result, name)
87 }
88
89 return strings.Join(result, ".")
90}
91
92// stateId returns the ID that this resource should be entered with
93// in the state. This is also used for diffs. In the future, we'd like to
94// move away from this string field so I don't export this.
95func (r *ResourceAddress) stateId() string {
96 result := fmt.Sprintf("%s.%s", r.Type, r.Name)
97 switch r.Mode {
98 case config.ManagedResourceMode:
99 // Done
100 case config.DataResourceMode:
101 result = fmt.Sprintf("data.%s", result)
102 default:
103 panic(fmt.Errorf("unknown resource mode: %s", r.Mode))
104 }
105 if r.Index >= 0 {
106 result += fmt.Sprintf(".%d", r.Index)
107 }
108
109 return result
110}
111
112// parseResourceAddressConfig creates a resource address from a config.Resource
113func parseResourceAddressConfig(r *config.Resource) (*ResourceAddress, error) {
114 return &ResourceAddress{
115 Type: r.Type,
116 Name: r.Name,
117 Index: -1,
118 InstanceType: TypePrimary,
119 Mode: r.Mode,
120 }, nil
121}
122
123// parseResourceAddressInternal parses the somewhat bespoke resource
124// identifier used in states and diffs, such as "instance.name.0".
125func parseResourceAddressInternal(s string) (*ResourceAddress, error) {
126 // Split based on ".". Every resource address should have at least two
127 // elements (type and name).
128 parts := strings.Split(s, ".")
129 if len(parts) < 2 || len(parts) > 4 {
130 return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
131 }
132
133 // Data resource if we have at least 3 parts and the first one is data
134 mode := config.ManagedResourceMode
135 if len(parts) > 2 && parts[0] == "data" {
136 mode = config.DataResourceMode
137 parts = parts[1:]
138 }
139
140 // If we're not a data resource and we have more than 3, then it is an error
141 if len(parts) > 3 && mode != config.DataResourceMode {
142 return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
143 }
144
145 // Build the parts of the resource address that are guaranteed to exist
146 addr := &ResourceAddress{
147 Type: parts[0],
148 Name: parts[1],
149 Index: -1,
150 InstanceType: TypePrimary,
151 Mode: mode,
152 }
153
154 // If we have more parts, then we have an index. Parse that.
155 if len(parts) > 2 {
156 idx, err := strconv.ParseInt(parts[2], 0, 0)
157 if err != nil {
158 return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err)
159 }
160
161 addr.Index = int(idx)
162 }
163
164 return addr, nil
165}
166
167func ParseResourceAddress(s string) (*ResourceAddress, error) {
168 matches, err := tokenizeResourceAddress(s)
169 if err != nil {
170 return nil, err
171 }
172 mode := config.ManagedResourceMode
173 if matches["data_prefix"] != "" {
174 mode = config.DataResourceMode
175 }
176 resourceIndex, err := ParseResourceIndex(matches["index"])
177 if err != nil {
178 return nil, err
179 }
180 instanceType, err := ParseInstanceType(matches["instance_type"])
181 if err != nil {
182 return nil, err
183 }
184 path := ParseResourcePath(matches["path"])
185
186 // not allowed to say "data." without a type following
187 if mode == config.DataResourceMode && matches["type"] == "" {
188 return nil, fmt.Errorf("must target specific data instance")
189 }
190
191 return &ResourceAddress{
192 Path: path,
193 Index: resourceIndex,
194 InstanceType: instanceType,
195 InstanceTypeSet: matches["instance_type"] != "",
196 Name: matches["name"],
197 Type: matches["type"],
198 Mode: mode,
199 }, nil
200}
201
202func (addr *ResourceAddress) Equals(raw interface{}) bool {
203 other, ok := raw.(*ResourceAddress)
204 if !ok {
205 return false
206 }
207
208 pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 ||
209 reflect.DeepEqual(addr.Path, other.Path)
210
211 indexMatch := addr.Index == -1 ||
212 other.Index == -1 ||
213 addr.Index == other.Index
214
215 nameMatch := addr.Name == "" ||
216 other.Name == "" ||
217 addr.Name == other.Name
218
219 typeMatch := addr.Type == "" ||
220 other.Type == "" ||
221 addr.Type == other.Type
222
223 // mode is significant only when type is set
224 modeMatch := addr.Type == "" ||
225 other.Type == "" ||
226 addr.Mode == other.Mode
227
228 return pathMatch &&
229 indexMatch &&
230 addr.InstanceType == other.InstanceType &&
231 nameMatch &&
232 typeMatch &&
233 modeMatch
234}
235
236func ParseResourceIndex(s string) (int, error) {
237 if s == "" {
238 return -1, nil
239 }
240 return strconv.Atoi(s)
241}
242
243func ParseResourcePath(s string) []string {
244 if s == "" {
245 return nil
246 }
247 parts := strings.Split(s, ".")
248 path := make([]string, 0, len(parts))
249 for _, s := range parts {
250 // Due to the limitations of the regexp match below, the path match has
251 // some noise in it we have to filter out :|
252 if s == "" || s == "module" {
253 continue
254 }
255 path = append(path, s)
256 }
257 return path
258}
259
260func ParseInstanceType(s string) (InstanceType, error) {
261 switch s {
262 case "", "primary":
263 return TypePrimary, nil
264 case "deposed":
265 return TypeDeposed, nil
266 case "tainted":
267 return TypeTainted, nil
268 default:
269 return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s)
270 }
271}
272
273func tokenizeResourceAddress(s string) (map[string]string, error) {
274 // Example of portions of the regexp below using the
275 // string "aws_instance.web.tainted[1]"
276 re := regexp.MustCompile(`\A` +
277 // "module.foo.module.bar" (optional)
278 `(?P<path>(?:module\.[^.]+\.?)*)` +
279 // possibly "data.", if targeting is a data resource
280 `(?P<data_prefix>(?:data\.)?)` +
281 // "aws_instance.web" (optional when module path specified)
282 `(?:(?P<type>[^.]+)\.(?P<name>[^.[]+))?` +
283 // "tainted" (optional, omission implies: "primary")
284 `(?:\.(?P<instance_type>\w+))?` +
285 // "1" (optional, omission implies: "0")
286 `(?:\[(?P<index>\d+)\])?` +
287 `\z`)
288
289 groupNames := re.SubexpNames()
290 rawMatches := re.FindAllStringSubmatch(s, -1)
291 if len(rawMatches) != 1 {
292 return nil, fmt.Errorf("Problem parsing address: %q", s)
293 }
294
295 matches := make(map[string]string)
296 for i, m := range rawMatches[0] {
297 matches[groupNames[i]] = m
298 }
299
300 return matches, nil
301}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
new file mode 100644
index 0000000..1a68c86
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
@@ -0,0 +1,204 @@
1package terraform
2
3// ResourceProvider is an interface that must be implemented by any
4// resource provider: the thing that creates and manages the resources in
5// a Terraform configuration.
6//
7// Important implementation note: All returned pointers, such as
8// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to
9// shared data. Terraform is highly parallel and assumes that this data is safe
10// to read/write in parallel so it must be unique references. Note that it is
11// safe to return arguments as results, however.
12type ResourceProvider interface {
13 /*********************************************************************
14 * Functions related to the provider
15 *********************************************************************/
16
17 // Input is called to ask the provider to ask the user for input
18 // for completing the configuration if necesarry.
19 //
20 // This may or may not be called, so resource provider writers shouldn't
21 // rely on this being available to set some default values for validate
22 // later. Example of a situation where this wouldn't be called is if
23 // the user is not using a TTY.
24 Input(UIInput, *ResourceConfig) (*ResourceConfig, error)
25
26 // Validate is called once at the beginning with the raw configuration
27 // (no interpolation done) and can return a list of warnings and/or
28 // errors.
29 //
30 // This is called once with the provider configuration only. It may not
31 // be called at all if no provider configuration is given.
32 //
33 // This should not assume that any values of the configurations are valid.
34 // The primary use case of this call is to check that required keys are
35 // set.
36 Validate(*ResourceConfig) ([]string, []error)
37
38 // Configure configures the provider itself with the configuration
39 // given. This is useful for setting things like access keys.
40 //
41 // This won't be called at all if no provider configuration is given.
42 //
43 // Configure returns an error if it occurred.
44 Configure(*ResourceConfig) error
45
46 // Resources returns all the available resource types that this provider
47 // knows how to manage.
48 Resources() []ResourceType
49
50 // Stop is called when the provider should halt any in-flight actions.
51 //
52 // This can be used to make a nicer Ctrl-C experience for Terraform.
53 // Even if this isn't implemented to do anything (just returns nil),
54 // Terraform will still cleanly stop after the currently executing
55 // graph node is complete. However, this API can be used to make more
56 // efficient halts.
57 //
58 // Stop doesn't have to and shouldn't block waiting for in-flight actions
59 // to complete. It should take any action it wants and return immediately
60 // acknowledging it has received the stop request. Terraform core will
61 // automatically not make any further API calls to the provider soon
62 // after Stop is called (technically exactly once the currently executing
63 // graph nodes are complete).
64 //
65 // The error returned, if non-nil, is assumed to mean that signaling the
66 // stop somehow failed and that the user should expect potentially waiting
67 // a longer period of time.
68 Stop() error
69
70 /*********************************************************************
71 * Functions related to individual resources
72 *********************************************************************/
73
74 // ValidateResource is called once at the beginning with the raw
75 // configuration (no interpolation done) and can return a list of warnings
76 // and/or errors.
77 //
78 // This is called once per resource.
79 //
80 // This should not assume any of the values in the resource configuration
81 // are valid since it is possible they have to be interpolated still.
82 // The primary use case of this call is to check that the required keys
83 // are set and that the general structure is correct.
84 ValidateResource(string, *ResourceConfig) ([]string, []error)
85
86 // Apply applies a diff to a specific resource and returns the new
87 // resource state along with an error.
88 //
89 // If the resource state given has an empty ID, then a new resource
90 // is expected to be created.
91 Apply(
92 *InstanceInfo,
93 *InstanceState,
94 *InstanceDiff) (*InstanceState, error)
95
96 // Diff diffs a resource versus a desired state and returns
97 // a diff.
98 Diff(
99 *InstanceInfo,
100 *InstanceState,
101 *ResourceConfig) (*InstanceDiff, error)
102
103 // Refresh refreshes a resource and updates all of its attributes
104 // with the latest information.
105 Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error)
106
107 /*********************************************************************
108 * Functions related to importing
109 *********************************************************************/
110
111 // ImportState requests that the given resource be imported.
112 //
113 // The returned InstanceState only requires ID be set. Importing
114 // will always call Refresh after the state to complete it.
115 //
116 // IMPORTANT: InstanceState doesn't have the resource type attached
117 // to it. A type must be specified on the state via the Ephemeral
118 // field on the state.
119 //
120 // This function can return multiple states. Normally, an import
121 // will map 1:1 to a physical resource. However, some resources map
122 // to multiple. For example, an AWS security group may contain many rules.
123 // Each rule is represented by a separate resource in Terraform,
124 // therefore multiple states are returned.
125 ImportState(*InstanceInfo, string) ([]*InstanceState, error)
126
127 /*********************************************************************
128 * Functions related to data resources
129 *********************************************************************/
130
131 // ValidateDataSource is called once at the beginning with the raw
132 // configuration (no interpolation done) and can return a list of warnings
133 // and/or errors.
134 //
135 // This is called once per data source instance.
136 //
137 // This should not assume any of the values in the resource configuration
138 // are valid since it is possible they have to be interpolated still.
139 // The primary use case of this call is to check that the required keys
140 // are set and that the general structure is correct.
141 ValidateDataSource(string, *ResourceConfig) ([]string, []error)
142
143 // DataSources returns all of the available data sources that this
144 // provider implements.
145 DataSources() []DataSource
146
147 // ReadDataDiff produces a diff that represents the state that will
148 // be produced when the given data source is read using a later call
149 // to ReadDataApply.
150 ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
151
152 // ReadDataApply initializes a data instance using the configuration
153 // in a diff produced by ReadDataDiff.
154 ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
155}
156
157// ResourceProviderCloser is an interface that providers that can close
158// connections that aren't needed anymore must implement.
159type ResourceProviderCloser interface {
160 Close() error
161}
162
163// ResourceType is a type of resource that a resource provider can manage.
164type ResourceType struct {
165 Name string // Name of the resource, example "instance" (no provider prefix)
166 Importable bool // Whether this resource supports importing
167}
168
169// DataSource is a data source that a resource provider implements.
170type DataSource struct {
171 Name string
172}
173
174// ResourceProviderFactory is a function type that creates a new instance
175// of a resource provider.
176type ResourceProviderFactory func() (ResourceProvider, error)
177
178// ResourceProviderFactoryFixed is a helper that creates a
179// ResourceProviderFactory that just returns some fixed provider.
180func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory {
181 return func() (ResourceProvider, error) {
182 return p, nil
183 }
184}
185
186func ProviderHasResource(p ResourceProvider, n string) bool {
187 for _, rt := range p.Resources() {
188 if rt.Name == n {
189 return true
190 }
191 }
192
193 return false
194}
195
196func ProviderHasDataSource(p ResourceProvider, n string) bool {
197 for _, rt := range p.DataSources() {
198 if rt.Name == n {
199 return true
200 }
201 }
202
203 return false
204}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
new file mode 100644
index 0000000..f531533
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
@@ -0,0 +1,297 @@
1package terraform
2
3import "sync"
4
5// MockResourceProvider implements ResourceProvider but mocks out all the
6// calls for testing purposes.
7type MockResourceProvider struct {
8 sync.Mutex
9
10 // Anything you want, in case you need to store extra data with the mock.
11 Meta interface{}
12
13 CloseCalled bool
14 CloseError error
15 InputCalled bool
16 InputInput UIInput
17 InputConfig *ResourceConfig
18 InputReturnConfig *ResourceConfig
19 InputReturnError error
20 InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error)
21 ApplyCalled bool
22 ApplyInfo *InstanceInfo
23 ApplyState *InstanceState
24 ApplyDiff *InstanceDiff
25 ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error)
26 ApplyReturn *InstanceState
27 ApplyReturnError error
28 ConfigureCalled bool
29 ConfigureConfig *ResourceConfig
30 ConfigureFn func(*ResourceConfig) error
31 ConfigureReturnError error
32 DiffCalled bool
33 DiffInfo *InstanceInfo
34 DiffState *InstanceState
35 DiffDesired *ResourceConfig
36 DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error)
37 DiffReturn *InstanceDiff
38 DiffReturnError error
39 RefreshCalled bool
40 RefreshInfo *InstanceInfo
41 RefreshState *InstanceState
42 RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error)
43 RefreshReturn *InstanceState
44 RefreshReturnError error
45 ResourcesCalled bool
46 ResourcesReturn []ResourceType
47 ReadDataApplyCalled bool
48 ReadDataApplyInfo *InstanceInfo
49 ReadDataApplyDiff *InstanceDiff
50 ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
51 ReadDataApplyReturn *InstanceState
52 ReadDataApplyReturnError error
53 ReadDataDiffCalled bool
54 ReadDataDiffInfo *InstanceInfo
55 ReadDataDiffDesired *ResourceConfig
56 ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
57 ReadDataDiffReturn *InstanceDiff
58 ReadDataDiffReturnError error
59 StopCalled bool
60 StopFn func() error
61 StopReturnError error
62 DataSourcesCalled bool
63 DataSourcesReturn []DataSource
64 ValidateCalled bool
65 ValidateConfig *ResourceConfig
66 ValidateFn func(*ResourceConfig) ([]string, []error)
67 ValidateReturnWarns []string
68 ValidateReturnErrors []error
69 ValidateResourceFn func(string, *ResourceConfig) ([]string, []error)
70 ValidateResourceCalled bool
71 ValidateResourceType string
72 ValidateResourceConfig *ResourceConfig
73 ValidateResourceReturnWarns []string
74 ValidateResourceReturnErrors []error
75 ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error)
76 ValidateDataSourceCalled bool
77 ValidateDataSourceType string
78 ValidateDataSourceConfig *ResourceConfig
79 ValidateDataSourceReturnWarns []string
80 ValidateDataSourceReturnErrors []error
81
82 ImportStateCalled bool
83 ImportStateInfo *InstanceInfo
84 ImportStateID string
85 ImportStateReturn []*InstanceState
86 ImportStateReturnError error
87 ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error)
88}
89
90func (p *MockResourceProvider) Close() error {
91 p.CloseCalled = true
92 return p.CloseError
93}
94
95func (p *MockResourceProvider) Input(
96 input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
97 p.InputCalled = true
98 p.InputInput = input
99 p.InputConfig = c
100 if p.InputFn != nil {
101 return p.InputFn(input, c)
102 }
103 return p.InputReturnConfig, p.InputReturnError
104}
105
106func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) {
107 p.Lock()
108 defer p.Unlock()
109
110 p.ValidateCalled = true
111 p.ValidateConfig = c
112 if p.ValidateFn != nil {
113 return p.ValidateFn(c)
114 }
115 return p.ValidateReturnWarns, p.ValidateReturnErrors
116}
117
118func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
119 p.Lock()
120 defer p.Unlock()
121
122 p.ValidateResourceCalled = true
123 p.ValidateResourceType = t
124 p.ValidateResourceConfig = c
125
126 if p.ValidateResourceFn != nil {
127 return p.ValidateResourceFn(t, c)
128 }
129
130 return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors
131}
132
133func (p *MockResourceProvider) Configure(c *ResourceConfig) error {
134 p.Lock()
135 defer p.Unlock()
136
137 p.ConfigureCalled = true
138 p.ConfigureConfig = c
139
140 if p.ConfigureFn != nil {
141 return p.ConfigureFn(c)
142 }
143
144 return p.ConfigureReturnError
145}
146
147func (p *MockResourceProvider) Stop() error {
148 p.Lock()
149 defer p.Unlock()
150
151 p.StopCalled = true
152 if p.StopFn != nil {
153 return p.StopFn()
154 }
155
156 return p.StopReturnError
157}
158
159func (p *MockResourceProvider) Apply(
160 info *InstanceInfo,
161 state *InstanceState,
162 diff *InstanceDiff) (*InstanceState, error) {
163 // We only lock while writing data. Reading is fine
164 p.Lock()
165 p.ApplyCalled = true
166 p.ApplyInfo = info
167 p.ApplyState = state
168 p.ApplyDiff = diff
169 p.Unlock()
170
171 if p.ApplyFn != nil {
172 return p.ApplyFn(info, state, diff)
173 }
174
175 return p.ApplyReturn.DeepCopy(), p.ApplyReturnError
176}
177
178func (p *MockResourceProvider) Diff(
179 info *InstanceInfo,
180 state *InstanceState,
181 desired *ResourceConfig) (*InstanceDiff, error) {
182 p.Lock()
183 defer p.Unlock()
184
185 p.DiffCalled = true
186 p.DiffInfo = info
187 p.DiffState = state
188 p.DiffDesired = desired
189 if p.DiffFn != nil {
190 return p.DiffFn(info, state, desired)
191 }
192
193 return p.DiffReturn.DeepCopy(), p.DiffReturnError
194}
195
196func (p *MockResourceProvider) Refresh(
197 info *InstanceInfo,
198 s *InstanceState) (*InstanceState, error) {
199 p.Lock()
200 defer p.Unlock()
201
202 p.RefreshCalled = true
203 p.RefreshInfo = info
204 p.RefreshState = s
205
206 if p.RefreshFn != nil {
207 return p.RefreshFn(info, s)
208 }
209
210 return p.RefreshReturn.DeepCopy(), p.RefreshReturnError
211}
212
213func (p *MockResourceProvider) Resources() []ResourceType {
214 p.Lock()
215 defer p.Unlock()
216
217 p.ResourcesCalled = true
218 return p.ResourcesReturn
219}
220
221func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
222 p.Lock()
223 defer p.Unlock()
224
225 p.ImportStateCalled = true
226 p.ImportStateInfo = info
227 p.ImportStateID = id
228 if p.ImportStateFn != nil {
229 return p.ImportStateFn(info, id)
230 }
231
232 var result []*InstanceState
233 if p.ImportStateReturn != nil {
234 result = make([]*InstanceState, len(p.ImportStateReturn))
235 for i, v := range p.ImportStateReturn {
236 result[i] = v.DeepCopy()
237 }
238 }
239
240 return result, p.ImportStateReturnError
241}
242
243func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) {
244 p.Lock()
245 defer p.Unlock()
246
247 p.ValidateDataSourceCalled = true
248 p.ValidateDataSourceType = t
249 p.ValidateDataSourceConfig = c
250
251 if p.ValidateDataSourceFn != nil {
252 return p.ValidateDataSourceFn(t, c)
253 }
254
255 return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors
256}
257
258func (p *MockResourceProvider) ReadDataDiff(
259 info *InstanceInfo,
260 desired *ResourceConfig) (*InstanceDiff, error) {
261 p.Lock()
262 defer p.Unlock()
263
264 p.ReadDataDiffCalled = true
265 p.ReadDataDiffInfo = info
266 p.ReadDataDiffDesired = desired
267 if p.ReadDataDiffFn != nil {
268 return p.ReadDataDiffFn(info, desired)
269 }
270
271 return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError
272}
273
274func (p *MockResourceProvider) ReadDataApply(
275 info *InstanceInfo,
276 d *InstanceDiff) (*InstanceState, error) {
277 p.Lock()
278 defer p.Unlock()
279
280 p.ReadDataApplyCalled = true
281 p.ReadDataApplyInfo = info
282 p.ReadDataApplyDiff = d
283
284 if p.ReadDataApplyFn != nil {
285 return p.ReadDataApplyFn(info, d)
286 }
287
288 return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError
289}
290
291func (p *MockResourceProvider) DataSources() []DataSource {
292 p.Lock()
293 defer p.Unlock()
294
295 p.DataSourcesCalled = true
296 return p.DataSourcesReturn
297}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
new file mode 100644
index 0000000..361ec1e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
@@ -0,0 +1,54 @@
1package terraform
2
3// ResourceProvisioner is an interface that must be implemented by any
4// resource provisioner: the thing that initializes resources in
5// a Terraform configuration.
6type ResourceProvisioner interface {
7 // Validate is called once at the beginning with the raw
8 // configuration (no interpolation done) and can return a list of warnings
9 // and/or errors.
10 //
11 // This is called once per resource.
12 //
13 // This should not assume any of the values in the resource configuration
14 // are valid since it is possible they have to be interpolated still.
15 // The primary use case of this call is to check that the required keys
16 // are set and that the general structure is correct.
17 Validate(*ResourceConfig) ([]string, []error)
18
19 // Apply runs the provisioner on a specific resource and returns the new
20 // resource state along with an error. Instead of a diff, the ResourceConfig
21 // is provided since provisioners only run after a resource has been
22 // newly created.
23 Apply(UIOutput, *InstanceState, *ResourceConfig) error
24
25 // Stop is called when the provisioner should halt any in-flight actions.
26 //
27 // This can be used to make a nicer Ctrl-C experience for Terraform.
28 // Even if this isn't implemented to do anything (just returns nil),
29 // Terraform will still cleanly stop after the currently executing
30 // graph node is complete. However, this API can be used to make more
31 // efficient halts.
32 //
33 // Stop doesn't have to and shouldn't block waiting for in-flight actions
34 // to complete. It should take any action it wants and return immediately
35 // acknowledging it has received the stop request. Terraform core will
36 // automatically not make any further API calls to the provider soon
37 // after Stop is called (technically exactly once the currently executing
38 // graph nodes are complete).
39 //
40 // The error returned, if non-nil, is assumed to mean that signaling the
41 // stop somehow failed and that the user should expect potentially waiting
42 // a longer period of time.
43 Stop() error
44}
45
46// ResourceProvisionerCloser is an interface that provisioners that can close
47// connections that aren't needed anymore must implement.
48type ResourceProvisionerCloser interface {
49 Close() error
50}
51
52// ResourceProvisionerFactory is a function type that creates a new instance
53// of a resource provisioner.
54type ResourceProvisionerFactory func() (ResourceProvisioner, error)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
new file mode 100644
index 0000000..f471a51
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
@@ -0,0 +1,72 @@
1package terraform
2
3import "sync"
4
5// MockResourceProvisioner implements ResourceProvisioner but mocks out all the
6// calls for testing purposes.
7type MockResourceProvisioner struct {
8 sync.Mutex
9 // Anything you want, in case you need to store extra data with the mock.
10 Meta interface{}
11
12 ApplyCalled bool
13 ApplyOutput UIOutput
14 ApplyState *InstanceState
15 ApplyConfig *ResourceConfig
16 ApplyFn func(*InstanceState, *ResourceConfig) error
17 ApplyReturnError error
18
19 ValidateCalled bool
20 ValidateConfig *ResourceConfig
21 ValidateFn func(c *ResourceConfig) ([]string, []error)
22 ValidateReturnWarns []string
23 ValidateReturnErrors []error
24
25 StopCalled bool
26 StopFn func() error
27 StopReturnError error
28}
29
30func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) {
31 p.Lock()
32 defer p.Unlock()
33
34 p.ValidateCalled = true
35 p.ValidateConfig = c
36 if p.ValidateFn != nil {
37 return p.ValidateFn(c)
38 }
39 return p.ValidateReturnWarns, p.ValidateReturnErrors
40}
41
42func (p *MockResourceProvisioner) Apply(
43 output UIOutput,
44 state *InstanceState,
45 c *ResourceConfig) error {
46 p.Lock()
47
48 p.ApplyCalled = true
49 p.ApplyOutput = output
50 p.ApplyState = state
51 p.ApplyConfig = c
52 if p.ApplyFn != nil {
53 fn := p.ApplyFn
54 p.Unlock()
55 return fn(state, c)
56 }
57
58 defer p.Unlock()
59 return p.ApplyReturnError
60}
61
62func (p *MockResourceProvisioner) Stop() error {
63 p.Lock()
64 defer p.Unlock()
65
66 p.StopCalled = true
67 if p.StopFn != nil {
68 return p.StopFn()
69 }
70
71 return p.StopReturnError
72}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
new file mode 100644
index 0000000..20f1d8a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
@@ -0,0 +1,132 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/dag"
10)
11
12// GraphSemanticChecker is the interface that semantic checks across
13// the entire Terraform graph implement.
14//
15// The graph should NOT be modified by the semantic checker.
16type GraphSemanticChecker interface {
17 Check(*dag.Graph) error
18}
19
20// UnorderedSemanticCheckRunner is an implementation of GraphSemanticChecker
21// that runs a list of SemanticCheckers against the vertices of the graph
22// in no specified order.
23type UnorderedSemanticCheckRunner struct {
24 Checks []SemanticChecker
25}
26
27func (sc *UnorderedSemanticCheckRunner) Check(g *dag.Graph) error {
28 var err error
29 for _, v := range g.Vertices() {
30 for _, check := range sc.Checks {
31 if e := check.Check(g, v); e != nil {
32 err = multierror.Append(err, e)
33 }
34 }
35 }
36
37 return err
38}
39
40// SemanticChecker is the interface that semantic checks across the
41// Terraform graph implement. Errors are accumulated. Even after an error
42// is returned, child vertices in the graph will still be visited.
43//
44// The graph should NOT be modified by the semantic checker.
45//
46// The order in which vertices are visited is left unspecified, so the
47// semantic checks should not rely on that.
48type SemanticChecker interface {
49 Check(*dag.Graph, dag.Vertex) error
50}
51
52// smcUserVariables does all the semantic checks to verify that the
53// variables given satisfy the configuration itself.
54func smcUserVariables(c *config.Config, vs map[string]interface{}) []error {
55 var errs []error
56
57 cvs := make(map[string]*config.Variable)
58 for _, v := range c.Variables {
59 cvs[v.Name] = v
60 }
61
62 // Check that all required variables are present
63 required := make(map[string]struct{})
64 for _, v := range c.Variables {
65 if v.Required() {
66 required[v.Name] = struct{}{}
67 }
68 }
69 for k, _ := range vs {
70 delete(required, k)
71 }
72 if len(required) > 0 {
73 for k, _ := range required {
74 errs = append(errs, fmt.Errorf(
75 "Required variable not set: %s", k))
76 }
77 }
78
79 // Check that types match up
80 for name, proposedValue := range vs {
81 // Check for "map.key" fields. These stopped working with Terraform
82 // 0.7 but we do this to surface a better error message informing
83 // the user what happened.
84 if idx := strings.Index(name, "."); idx > 0 {
85 key := name[:idx]
86 if _, ok := cvs[key]; ok {
87 errs = append(errs, fmt.Errorf(
88 "%s: Overriding map keys with the format `name.key` is no "+
89 "longer allowed. You may still override keys by setting "+
90 "`name = { key = value }`. The maps will be merged. This "+
91 "behavior appeared in 0.7.0.", name))
92 continue
93 }
94 }
95
96 schema, ok := cvs[name]
97 if !ok {
98 continue
99 }
100
101 declaredType := schema.Type()
102
103 switch declaredType {
104 case config.VariableTypeString:
105 switch proposedValue.(type) {
106 case string:
107 continue
108 }
109 case config.VariableTypeMap:
110 switch v := proposedValue.(type) {
111 case map[string]interface{}:
112 continue
113 case []map[string]interface{}:
114 // if we have a list of 1 map, it will get coerced later as needed
115 if len(v) == 1 {
116 continue
117 }
118 }
119 case config.VariableTypeList:
120 switch proposedValue.(type) {
121 case []interface{}:
122 continue
123 }
124 }
125 errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s",
126 name, declaredType.Printable(), hclTypeName(proposedValue)))
127 }
128
129 // TODO(mitchellh): variables that are unknown
130
131 return errs
132}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow.go b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
new file mode 100644
index 0000000..4632559
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
@@ -0,0 +1,28 @@
1package terraform
2
3// Shadow is the interface that any "shadow" structures must implement.
4//
5// A shadow structure is an interface implementation (typically) that
6// shadows a real implementation and verifies that the same behavior occurs
7// on both. The semantics of this behavior are up to the interface itself.
8//
9// A shadow NEVER modifies real values or state. It must always be safe to use.
10//
11// For example, a ResourceProvider shadow ensures that the same operations
12// are done on the same resources with the same configurations.
13//
14// The typical usage of a shadow following this interface is to complete
15// the real operations, then call CloseShadow which tells the shadow that
16// the real side is done. Then, once the shadow is also complete, call
17// ShadowError to find any errors that may have been caught.
18type Shadow interface {
19 // CloseShadow tells the shadow that the REAL implementation is
20 // complete. Therefore, any calls that would block should now return
21 // immediately since no more changes will happen to the real side.
22 CloseShadow() error
23
24 // ShadowError returns the errors that the shadow has found.
25 // This should be called AFTER CloseShadow and AFTER the shadow is
26 // known to be complete (no more calls to it).
27 ShadowError() error
28}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
new file mode 100644
index 0000000..116cf84
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
@@ -0,0 +1,273 @@
1package terraform
2
3import (
4 "fmt"
5 "sync"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/hashicorp/terraform/helper/shadow"
9)
10
11// newShadowComponentFactory creates a shadowed contextComponentFactory
12// so that requests to create new components result in both a real and
13// shadow side.
14func newShadowComponentFactory(
15 f contextComponentFactory) (contextComponentFactory, *shadowComponentFactory) {
16 // Create the shared data
17 shared := &shadowComponentFactoryShared{contextComponentFactory: f}
18
19 // Create the real side
20 real := &shadowComponentFactory{
21 shadowComponentFactoryShared: shared,
22 }
23
24 // Create the shadow
25 shadow := &shadowComponentFactory{
26 shadowComponentFactoryShared: shared,
27 Shadow: true,
28 }
29
30 return real, shadow
31}
32
33// shadowComponentFactory is the shadow side. Any components created
34// with this factory are fake and will not cause real work to happen.
35//
36// Unlike other shadowers, the shadow component factory will allow the
37// shadow to create _any_ component even if it is never requested on the
38// real side. This is because errors will happen later downstream as function
39// calls are made to the shadows that are never matched on the real side.
40type shadowComponentFactory struct {
41 *shadowComponentFactoryShared
42
43 Shadow bool // True if this should return the shadow
44 lock sync.Mutex
45}
46
47func (f *shadowComponentFactory) ResourceProvider(
48 n, uid string) (ResourceProvider, error) {
49 f.lock.Lock()
50 defer f.lock.Unlock()
51
52 real, shadow, err := f.shadowComponentFactoryShared.ResourceProvider(n, uid)
53 var result ResourceProvider = real
54 if f.Shadow {
55 result = shadow
56 }
57
58 return result, err
59}
60
61func (f *shadowComponentFactory) ResourceProvisioner(
62 n, uid string) (ResourceProvisioner, error) {
63 f.lock.Lock()
64 defer f.lock.Unlock()
65
66 real, shadow, err := f.shadowComponentFactoryShared.ResourceProvisioner(n, uid)
67 var result ResourceProvisioner = real
68 if f.Shadow {
69 result = shadow
70 }
71
72 return result, err
73}
74
75// CloseShadow is called when the _real_ side is complete. This will cause
76// all future blocking operations to return immediately on the shadow to
77// ensure the shadow also completes.
78func (f *shadowComponentFactory) CloseShadow() error {
79 // If we aren't the shadow, just return
80 if !f.Shadow {
81 return nil
82 }
83
84 // Lock ourselves so we don't modify state
85 f.lock.Lock()
86 defer f.lock.Unlock()
87
88 // Grab our shared state
89 shared := f.shadowComponentFactoryShared
90
91 // If we're already closed, its an error
92 if shared.closed {
93 return fmt.Errorf("component factory shadow already closed")
94 }
95
96 // Close all the providers and provisioners and return the error
97 var result error
98 for _, n := range shared.providerKeys {
99 _, shadow, err := shared.ResourceProvider(n, n)
100 if err == nil && shadow != nil {
101 if err := shadow.CloseShadow(); err != nil {
102 result = multierror.Append(result, err)
103 }
104 }
105 }
106
107 for _, n := range shared.provisionerKeys {
108 _, shadow, err := shared.ResourceProvisioner(n, n)
109 if err == nil && shadow != nil {
110 if err := shadow.CloseShadow(); err != nil {
111 result = multierror.Append(result, err)
112 }
113 }
114 }
115
116 // Mark ourselves as closed
117 shared.closed = true
118
119 return result
120}
121
122func (f *shadowComponentFactory) ShadowError() error {
123 // If we aren't the shadow, just return
124 if !f.Shadow {
125 return nil
126 }
127
128 // Lock ourselves so we don't modify state
129 f.lock.Lock()
130 defer f.lock.Unlock()
131
132 // Grab our shared state
133 shared := f.shadowComponentFactoryShared
134
135 // If we're not closed, its an error
136 if !shared.closed {
137 return fmt.Errorf("component factory must be closed to retrieve errors")
138 }
139
140 // Close all the providers and provisioners and return the error
141 var result error
142 for _, n := range shared.providerKeys {
143 _, shadow, err := shared.ResourceProvider(n, n)
144 if err == nil && shadow != nil {
145 if err := shadow.ShadowError(); err != nil {
146 result = multierror.Append(result, err)
147 }
148 }
149 }
150
151 for _, n := range shared.provisionerKeys {
152 _, shadow, err := shared.ResourceProvisioner(n, n)
153 if err == nil && shadow != nil {
154 if err := shadow.ShadowError(); err != nil {
155 result = multierror.Append(result, err)
156 }
157 }
158 }
159
160 return result
161}
162
163// shadowComponentFactoryShared is shared data between the two factories.
164//
165// It is NOT SAFE to run any function on this struct in parallel. Lock
166// access to this struct.
167type shadowComponentFactoryShared struct {
168 contextComponentFactory
169
170 closed bool
171 providers shadow.KeyedValue
172 providerKeys []string
173 provisioners shadow.KeyedValue
174 provisionerKeys []string
175}
176
177// shadowResourceProviderFactoryEntry is the entry that is stored in
178// the Shadows key/value for a provider.
179type shadowComponentFactoryProviderEntry struct {
180 Real ResourceProvider
181 Shadow shadowResourceProvider
182 Err error
183}
184
185type shadowComponentFactoryProvisionerEntry struct {
186 Real ResourceProvisioner
187 Shadow shadowResourceProvisioner
188 Err error
189}
190
191func (f *shadowComponentFactoryShared) ResourceProvider(
192 n, uid string) (ResourceProvider, shadowResourceProvider, error) {
193 // Determine if we already have a value
194 raw, ok := f.providers.ValueOk(uid)
195 if !ok {
196 // Build the entry
197 var entry shadowComponentFactoryProviderEntry
198
199 // No value, initialize. Create the original
200 p, err := f.contextComponentFactory.ResourceProvider(n, uid)
201 if err != nil {
202 entry.Err = err
203 p = nil // Just to be sure
204 }
205
206 if p != nil {
207 // Create the shadow
208 real, shadow := newShadowResourceProvider(p)
209 entry.Real = real
210 entry.Shadow = shadow
211
212 if f.closed {
213 shadow.CloseShadow()
214 }
215 }
216
217 // Store the value
218 f.providers.SetValue(uid, &entry)
219 f.providerKeys = append(f.providerKeys, uid)
220 raw = &entry
221 }
222
223 // Read the entry
224 entry, ok := raw.(*shadowComponentFactoryProviderEntry)
225 if !ok {
226 return nil, nil, fmt.Errorf("Unknown value for shadow provider: %#v", raw)
227 }
228
229 // Return
230 return entry.Real, entry.Shadow, entry.Err
231}
232
233func (f *shadowComponentFactoryShared) ResourceProvisioner(
234 n, uid string) (ResourceProvisioner, shadowResourceProvisioner, error) {
235 // Determine if we already have a value
236 raw, ok := f.provisioners.ValueOk(uid)
237 if !ok {
238 // Build the entry
239 var entry shadowComponentFactoryProvisionerEntry
240
241 // No value, initialize. Create the original
242 p, err := f.contextComponentFactory.ResourceProvisioner(n, uid)
243 if err != nil {
244 entry.Err = err
245 p = nil // Just to be sure
246 }
247
248 if p != nil {
249 // For now, just create a mock since we don't support provisioners yet
250 real, shadow := newShadowResourceProvisioner(p)
251 entry.Real = real
252 entry.Shadow = shadow
253
254 if f.closed {
255 shadow.CloseShadow()
256 }
257 }
258
259 // Store the value
260 f.provisioners.SetValue(uid, &entry)
261 f.provisionerKeys = append(f.provisionerKeys, uid)
262 raw = &entry
263 }
264
265 // Read the entry
266 entry, ok := raw.(*shadowComponentFactoryProvisionerEntry)
267 if !ok {
268 return nil, nil, fmt.Errorf("Unknown value for shadow provisioner: %#v", raw)
269 }
270
271 // Return
272 return entry.Real, entry.Shadow, entry.Err
273}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
new file mode 100644
index 0000000..5588af2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
@@ -0,0 +1,158 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/mitchellh/copystructure"
9)
10
11// newShadowContext creates a new context that will shadow the given context
12// when walking the graph. The resulting context should be used _only once_
13// for a graph walk.
14//
15// The returned Shadow should be closed after the graph walk with the
16// real context is complete. Errors from the shadow can be retrieved there.
17//
18// Most importantly, any operations done on the shadow context (the returned
19// context) will NEVER affect the real context. All structures are deep
20// copied, no real providers or resources are used, etc.
21func newShadowContext(c *Context) (*Context, *Context, Shadow) {
22 // Copy the targets
23 targetRaw, err := copystructure.Copy(c.targets)
24 if err != nil {
25 panic(err)
26 }
27
28 // Copy the variables
29 varRaw, err := copystructure.Copy(c.variables)
30 if err != nil {
31 panic(err)
32 }
33
34 // Copy the provider inputs
35 providerInputRaw, err := copystructure.Copy(c.providerInputConfig)
36 if err != nil {
37 panic(err)
38 }
39
40 // The factories
41 componentsReal, componentsShadow := newShadowComponentFactory(c.components)
42
43 // Create the shadow
44 shadow := &Context{
45 components: componentsShadow,
46 destroy: c.destroy,
47 diff: c.diff.DeepCopy(),
48 hooks: nil,
49 meta: c.meta,
50 module: c.module,
51 state: c.state.DeepCopy(),
52 targets: targetRaw.([]string),
53 variables: varRaw.(map[string]interface{}),
54
55 // NOTE(mitchellh): This is not going to work for shadows that are
56 // testing that input results in the proper end state. At the time
57 // of writing, input is not used in any state-changing graph
58 // walks anyways, so this checks nothing. We set it to this to avoid
59 // any panics but even a "nil" value worked here.
60 uiInput: new(MockUIInput),
61
62 // Hardcoded to 4 since parallelism in the shadow doesn't matter
63 // a ton since we're doing far less compared to the real side
64 // and our operations are MUCH faster.
65 parallelSem: NewSemaphore(4),
66 providerInputConfig: providerInputRaw.(map[string]map[string]interface{}),
67 }
68
69 // Create the real context. This is effectively just a copy of
70 // the context given except we need to modify some of the values
71 // to point to the real side of a shadow so the shadow can compare values.
72 real := &Context{
73 // The fields below are changed.
74 components: componentsReal,
75
76 // The fields below are direct copies
77 destroy: c.destroy,
78 diff: c.diff,
79 // diffLock - no copy
80 hooks: c.hooks,
81 meta: c.meta,
82 module: c.module,
83 sh: c.sh,
84 state: c.state,
85 // stateLock - no copy
86 targets: c.targets,
87 uiInput: c.uiInput,
88 variables: c.variables,
89
90 // l - no copy
91 parallelSem: c.parallelSem,
92 providerInputConfig: c.providerInputConfig,
93 runContext: c.runContext,
94 runContextCancel: c.runContextCancel,
95 shadowErr: c.shadowErr,
96 }
97
98 return real, shadow, &shadowContextCloser{
99 Components: componentsShadow,
100 }
101}
102
103// shadowContextVerify takes the real and shadow context and verifies they
104// have equal diffs and states.
105func shadowContextVerify(real, shadow *Context) error {
106 var result error
107
108 // The states compared must be pruned so they're minimal/clean
109 real.state.prune()
110 shadow.state.prune()
111
112 // Compare the states
113 if !real.state.Equal(shadow.state) {
114 result = multierror.Append(result, fmt.Errorf(
115 "Real and shadow states do not match! "+
116 "Real state:\n\n%s\n\n"+
117 "Shadow state:\n\n%s\n\n",
118 real.state, shadow.state))
119 }
120
121 // Compare the diffs
122 if !real.diff.Equal(shadow.diff) {
123 result = multierror.Append(result, fmt.Errorf(
124 "Real and shadow diffs do not match! "+
125 "Real diff:\n\n%s\n\n"+
126 "Shadow diff:\n\n%s\n\n",
127 real.diff, shadow.diff))
128 }
129
130 return result
131}
132
133// shadowContextCloser is the io.Closer returned by newShadowContext that
134// closes all the shadows and returns the results.
135type shadowContextCloser struct {
136 Components *shadowComponentFactory
137}
138
139// Close closes the shadow context.
140func (c *shadowContextCloser) CloseShadow() error {
141 return c.Components.CloseShadow()
142}
143
144func (c *shadowContextCloser) ShadowError() error {
145 err := c.Components.ShadowError()
146 if err == nil {
147 return nil
148 }
149
150 // This is a sad edge case: if the configuration contains uuid() at
151 // any point, we cannot reason aboyt the shadow execution. Tested
152 // with Context2Plan_shadowUuid.
153 if strings.Contains(err.Error(), "uuid()") {
154 err = nil
155 }
156
157 return err
158}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
new file mode 100644
index 0000000..9741d7e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
@@ -0,0 +1,815 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "sync"
7
8 "github.com/hashicorp/go-multierror"
9 "github.com/hashicorp/terraform/helper/shadow"
10)
11
12// shadowResourceProvider implements ResourceProvider for the shadow
13// eval context defined in eval_context_shadow.go.
14//
15// This is used to verify behavior with a real provider. This shouldn't
16// be used directly.
17type shadowResourceProvider interface {
18 ResourceProvider
19 Shadow
20}
21
22// newShadowResourceProvider creates a new shadowed ResourceProvider.
23//
24// This will assume a well behaved real ResourceProvider. For example,
25// it assumes that the `Resources` call underneath doesn't change values
26// since once it is called on the real provider, it will be cached and
27// returned in the shadow since number of calls to that shouldn't affect
28// actual behavior.
29//
30// However, with calls like Apply, call order is taken into account,
31// parameters are checked for equality, etc.
32func newShadowResourceProvider(p ResourceProvider) (ResourceProvider, shadowResourceProvider) {
33 // Create the shared data
34 shared := shadowResourceProviderShared{}
35
36 // Create the real provider that does actual work
37 real := &shadowResourceProviderReal{
38 ResourceProvider: p,
39 Shared: &shared,
40 }
41
42 // Create the shadow that watches the real value
43 shadow := &shadowResourceProviderShadow{
44 Shared: &shared,
45
46 resources: p.Resources(),
47 dataSources: p.DataSources(),
48 }
49
50 return real, shadow
51}
52
53// shadowResourceProviderReal is the real resource provider. Function calls
54// to this will perform real work. This records the parameters and return
55// values and call order for the shadow to reproduce.
56type shadowResourceProviderReal struct {
57 ResourceProvider
58
59 Shared *shadowResourceProviderShared
60}
61
62func (p *shadowResourceProviderReal) Close() error {
63 var result error
64 if c, ok := p.ResourceProvider.(ResourceProviderCloser); ok {
65 result = c.Close()
66 }
67
68 p.Shared.CloseErr.SetValue(result)
69 return result
70}
71
72func (p *shadowResourceProviderReal) Input(
73 input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
74 cCopy := c.DeepCopy()
75
76 result, err := p.ResourceProvider.Input(input, c)
77 p.Shared.Input.SetValue(&shadowResourceProviderInput{
78 Config: cCopy,
79 Result: result.DeepCopy(),
80 ResultErr: err,
81 })
82
83 return result, err
84}
85
86func (p *shadowResourceProviderReal) Validate(c *ResourceConfig) ([]string, []error) {
87 warns, errs := p.ResourceProvider.Validate(c)
88 p.Shared.Validate.SetValue(&shadowResourceProviderValidate{
89 Config: c.DeepCopy(),
90 ResultWarn: warns,
91 ResultErr: errs,
92 })
93
94 return warns, errs
95}
96
97func (p *shadowResourceProviderReal) Configure(c *ResourceConfig) error {
98 cCopy := c.DeepCopy()
99
100 err := p.ResourceProvider.Configure(c)
101 p.Shared.Configure.SetValue(&shadowResourceProviderConfigure{
102 Config: cCopy,
103 Result: err,
104 })
105
106 return err
107}
108
109func (p *shadowResourceProviderReal) Stop() error {
110 return p.ResourceProvider.Stop()
111}
112
113func (p *shadowResourceProviderReal) ValidateResource(
114 t string, c *ResourceConfig) ([]string, []error) {
115 key := t
116 configCopy := c.DeepCopy()
117
118 // Real operation
119 warns, errs := p.ResourceProvider.ValidateResource(t, c)
120
121 // Initialize to ensure we always have a wrapper with a lock
122 p.Shared.ValidateResource.Init(
123 key, &shadowResourceProviderValidateResourceWrapper{})
124
125 // Get the result
126 raw := p.Shared.ValidateResource.Value(key)
127 wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
128 if !ok {
129 // If this fails then we just continue with our day... the shadow
130 // will fail to but there isn't much we can do.
131 log.Printf(
132 "[ERROR] unknown value in ValidateResource shadow value: %#v", raw)
133 return warns, errs
134 }
135
136 // Lock the wrapper for writing and record our call
137 wrapper.Lock()
138 defer wrapper.Unlock()
139
140 wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateResource{
141 Config: configCopy,
142 Warns: warns,
143 Errors: errs,
144 })
145
146 // With it locked, call SetValue again so that it triggers WaitForChange
147 p.Shared.ValidateResource.SetValue(key, wrapper)
148
149 // Return the result
150 return warns, errs
151}
152
153func (p *shadowResourceProviderReal) Apply(
154 info *InstanceInfo,
155 state *InstanceState,
156 diff *InstanceDiff) (*InstanceState, error) {
157 // Thse have to be copied before the call since call can modify
158 stateCopy := state.DeepCopy()
159 diffCopy := diff.DeepCopy()
160
161 result, err := p.ResourceProvider.Apply(info, state, diff)
162 p.Shared.Apply.SetValue(info.uniqueId(), &shadowResourceProviderApply{
163 State: stateCopy,
164 Diff: diffCopy,
165 Result: result.DeepCopy(),
166 ResultErr: err,
167 })
168
169 return result, err
170}
171
172func (p *shadowResourceProviderReal) Diff(
173 info *InstanceInfo,
174 state *InstanceState,
175 desired *ResourceConfig) (*InstanceDiff, error) {
176 // Thse have to be copied before the call since call can modify
177 stateCopy := state.DeepCopy()
178 desiredCopy := desired.DeepCopy()
179
180 result, err := p.ResourceProvider.Diff(info, state, desired)
181 p.Shared.Diff.SetValue(info.uniqueId(), &shadowResourceProviderDiff{
182 State: stateCopy,
183 Desired: desiredCopy,
184 Result: result.DeepCopy(),
185 ResultErr: err,
186 })
187
188 return result, err
189}
190
191func (p *shadowResourceProviderReal) Refresh(
192 info *InstanceInfo,
193 state *InstanceState) (*InstanceState, error) {
194 // Thse have to be copied before the call since call can modify
195 stateCopy := state.DeepCopy()
196
197 result, err := p.ResourceProvider.Refresh(info, state)
198 p.Shared.Refresh.SetValue(info.uniqueId(), &shadowResourceProviderRefresh{
199 State: stateCopy,
200 Result: result.DeepCopy(),
201 ResultErr: err,
202 })
203
204 return result, err
205}
206
207func (p *shadowResourceProviderReal) ValidateDataSource(
208 t string, c *ResourceConfig) ([]string, []error) {
209 key := t
210 configCopy := c.DeepCopy()
211
212 // Real operation
213 warns, errs := p.ResourceProvider.ValidateDataSource(t, c)
214
215 // Initialize
216 p.Shared.ValidateDataSource.Init(
217 key, &shadowResourceProviderValidateDataSourceWrapper{})
218
219 // Get the result
220 raw := p.Shared.ValidateDataSource.Value(key)
221 wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
222 if !ok {
223 // If this fails then we just continue with our day... the shadow
224 // will fail to but there isn't much we can do.
225 log.Printf(
226 "[ERROR] unknown value in ValidateDataSource shadow value: %#v", raw)
227 return warns, errs
228 }
229
230 // Lock the wrapper for writing and record our call
231 wrapper.Lock()
232 defer wrapper.Unlock()
233
234 wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateDataSource{
235 Config: configCopy,
236 Warns: warns,
237 Errors: errs,
238 })
239
240 // Set it
241 p.Shared.ValidateDataSource.SetValue(key, wrapper)
242
243 // Return the result
244 return warns, errs
245}
246
247func (p *shadowResourceProviderReal) ReadDataDiff(
248 info *InstanceInfo,
249 desired *ResourceConfig) (*InstanceDiff, error) {
250 // These have to be copied before the call since call can modify
251 desiredCopy := desired.DeepCopy()
252
253 result, err := p.ResourceProvider.ReadDataDiff(info, desired)
254 p.Shared.ReadDataDiff.SetValue(info.uniqueId(), &shadowResourceProviderReadDataDiff{
255 Desired: desiredCopy,
256 Result: result.DeepCopy(),
257 ResultErr: err,
258 })
259
260 return result, err
261}
262
263func (p *shadowResourceProviderReal) ReadDataApply(
264 info *InstanceInfo,
265 diff *InstanceDiff) (*InstanceState, error) {
266 // Thse have to be copied before the call since call can modify
267 diffCopy := diff.DeepCopy()
268
269 result, err := p.ResourceProvider.ReadDataApply(info, diff)
270 p.Shared.ReadDataApply.SetValue(info.uniqueId(), &shadowResourceProviderReadDataApply{
271 Diff: diffCopy,
272 Result: result.DeepCopy(),
273 ResultErr: err,
274 })
275
276 return result, err
277}
278
279// shadowResourceProviderShadow is the shadow resource provider. Function
280// calls never affect real resources. This is paired with the "real" side
281// which must be called properly to enable recording.
282type shadowResourceProviderShadow struct {
283 Shared *shadowResourceProviderShared
284
285 // Cached values that are expected to not change
286 resources []ResourceType
287 dataSources []DataSource
288
289 Error error // Error is the list of errors from the shadow
290 ErrorLock sync.Mutex
291}
292
293type shadowResourceProviderShared struct {
294 // NOTE: Anytime a value is added here, be sure to add it to
295 // the Close() method so that it is closed.
296
297 CloseErr shadow.Value
298 Input shadow.Value
299 Validate shadow.Value
300 Configure shadow.Value
301 ValidateResource shadow.KeyedValue
302 Apply shadow.KeyedValue
303 Diff shadow.KeyedValue
304 Refresh shadow.KeyedValue
305 ValidateDataSource shadow.KeyedValue
306 ReadDataDiff shadow.KeyedValue
307 ReadDataApply shadow.KeyedValue
308}
309
310func (p *shadowResourceProviderShared) Close() error {
311 return shadow.Close(p)
312}
313
314func (p *shadowResourceProviderShadow) CloseShadow() error {
315 err := p.Shared.Close()
316 if err != nil {
317 err = fmt.Errorf("close error: %s", err)
318 }
319
320 return err
321}
322
323func (p *shadowResourceProviderShadow) ShadowError() error {
324 return p.Error
325}
326
327func (p *shadowResourceProviderShadow) Resources() []ResourceType {
328 return p.resources
329}
330
331func (p *shadowResourceProviderShadow) DataSources() []DataSource {
332 return p.dataSources
333}
334
335func (p *shadowResourceProviderShadow) Close() error {
336 v := p.Shared.CloseErr.Value()
337 if v == nil {
338 return nil
339 }
340
341 return v.(error)
342}
343
344func (p *shadowResourceProviderShadow) Input(
345 input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
346 // Get the result of the input call
347 raw := p.Shared.Input.Value()
348 if raw == nil {
349 return nil, nil
350 }
351
352 result, ok := raw.(*shadowResourceProviderInput)
353 if !ok {
354 p.ErrorLock.Lock()
355 defer p.ErrorLock.Unlock()
356 p.Error = multierror.Append(p.Error, fmt.Errorf(
357 "Unknown 'input' shadow value: %#v", raw))
358 return nil, nil
359 }
360
361 // Compare the parameters, which should be identical
362 if !c.Equal(result.Config) {
363 p.ErrorLock.Lock()
364 p.Error = multierror.Append(p.Error, fmt.Errorf(
365 "Input had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
366 result.Config, c))
367 p.ErrorLock.Unlock()
368 }
369
370 // Return the results
371 return result.Result, result.ResultErr
372}
373
374func (p *shadowResourceProviderShadow) Validate(c *ResourceConfig) ([]string, []error) {
375 // Get the result of the validate call
376 raw := p.Shared.Validate.Value()
377 if raw == nil {
378 return nil, nil
379 }
380
381 result, ok := raw.(*shadowResourceProviderValidate)
382 if !ok {
383 p.ErrorLock.Lock()
384 defer p.ErrorLock.Unlock()
385 p.Error = multierror.Append(p.Error, fmt.Errorf(
386 "Unknown 'validate' shadow value: %#v", raw))
387 return nil, nil
388 }
389
390 // Compare the parameters, which should be identical
391 if !c.Equal(result.Config) {
392 p.ErrorLock.Lock()
393 p.Error = multierror.Append(p.Error, fmt.Errorf(
394 "Validate had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
395 result.Config, c))
396 p.ErrorLock.Unlock()
397 }
398
399 // Return the results
400 return result.ResultWarn, result.ResultErr
401}
402
403func (p *shadowResourceProviderShadow) Configure(c *ResourceConfig) error {
404 // Get the result of the call
405 raw := p.Shared.Configure.Value()
406 if raw == nil {
407 return nil
408 }
409
410 result, ok := raw.(*shadowResourceProviderConfigure)
411 if !ok {
412 p.ErrorLock.Lock()
413 defer p.ErrorLock.Unlock()
414 p.Error = multierror.Append(p.Error, fmt.Errorf(
415 "Unknown 'configure' shadow value: %#v", raw))
416 return nil
417 }
418
419 // Compare the parameters, which should be identical
420 if !c.Equal(result.Config) {
421 p.ErrorLock.Lock()
422 p.Error = multierror.Append(p.Error, fmt.Errorf(
423 "Configure had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
424 result.Config, c))
425 p.ErrorLock.Unlock()
426 }
427
428 // Return the results
429 return result.Result
430}
431
432// Stop returns immediately.
433func (p *shadowResourceProviderShadow) Stop() error {
434 return nil
435}
436
437func (p *shadowResourceProviderShadow) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
438 // Unique key
439 key := t
440
441 // Get the initial value
442 raw := p.Shared.ValidateResource.Value(key)
443
444 // Find a validation with our configuration
445 var result *shadowResourceProviderValidateResource
446 for {
447 // Get the value
448 if raw == nil {
449 p.ErrorLock.Lock()
450 defer p.ErrorLock.Unlock()
451 p.Error = multierror.Append(p.Error, fmt.Errorf(
452 "Unknown 'ValidateResource' call for %q:\n\n%#v",
453 key, c))
454 return nil, nil
455 }
456
457 wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
458 if !ok {
459 p.ErrorLock.Lock()
460 defer p.ErrorLock.Unlock()
461 p.Error = multierror.Append(p.Error, fmt.Errorf(
462 "Unknown 'ValidateResource' shadow value for %q: %#v", key, raw))
463 return nil, nil
464 }
465
466 // Look for the matching call with our configuration
467 wrapper.RLock()
468 for _, call := range wrapper.Calls {
469 if call.Config.Equal(c) {
470 result = call
471 break
472 }
473 }
474 wrapper.RUnlock()
475
476 // If we found a result, exit
477 if result != nil {
478 break
479 }
480
481 // Wait for a change so we can get the wrapper again
482 raw = p.Shared.ValidateResource.WaitForChange(key)
483 }
484
485 return result.Warns, result.Errors
486}
487
488func (p *shadowResourceProviderShadow) Apply(
489 info *InstanceInfo,
490 state *InstanceState,
491 diff *InstanceDiff) (*InstanceState, error) {
492 // Unique key
493 key := info.uniqueId()
494 raw := p.Shared.Apply.Value(key)
495 if raw == nil {
496 p.ErrorLock.Lock()
497 defer p.ErrorLock.Unlock()
498 p.Error = multierror.Append(p.Error, fmt.Errorf(
499 "Unknown 'apply' call for %q:\n\n%#v\n\n%#v",
500 key, state, diff))
501 return nil, nil
502 }
503
504 result, ok := raw.(*shadowResourceProviderApply)
505 if !ok {
506 p.ErrorLock.Lock()
507 defer p.ErrorLock.Unlock()
508 p.Error = multierror.Append(p.Error, fmt.Errorf(
509 "Unknown 'apply' shadow value for %q: %#v", key, raw))
510 return nil, nil
511 }
512
513 // Compare the parameters, which should be identical
514 if !state.Equal(result.State) {
515 p.ErrorLock.Lock()
516 p.Error = multierror.Append(p.Error, fmt.Errorf(
517 "Apply %q: state had unequal states (real, then shadow):\n\n%#v\n\n%#v",
518 key, result.State, state))
519 p.ErrorLock.Unlock()
520 }
521
522 if !diff.Equal(result.Diff) {
523 p.ErrorLock.Lock()
524 p.Error = multierror.Append(p.Error, fmt.Errorf(
525 "Apply %q: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
526 key, result.Diff, diff))
527 p.ErrorLock.Unlock()
528 }
529
530 return result.Result, result.ResultErr
531}
532
533func (p *shadowResourceProviderShadow) Diff(
534 info *InstanceInfo,
535 state *InstanceState,
536 desired *ResourceConfig) (*InstanceDiff, error) {
537 // Unique key
538 key := info.uniqueId()
539 raw := p.Shared.Diff.Value(key)
540 if raw == nil {
541 p.ErrorLock.Lock()
542 defer p.ErrorLock.Unlock()
543 p.Error = multierror.Append(p.Error, fmt.Errorf(
544 "Unknown 'diff' call for %q:\n\n%#v\n\n%#v",
545 key, state, desired))
546 return nil, nil
547 }
548
549 result, ok := raw.(*shadowResourceProviderDiff)
550 if !ok {
551 p.ErrorLock.Lock()
552 defer p.ErrorLock.Unlock()
553 p.Error = multierror.Append(p.Error, fmt.Errorf(
554 "Unknown 'diff' shadow value for %q: %#v", key, raw))
555 return nil, nil
556 }
557
558 // Compare the parameters, which should be identical
559 if !state.Equal(result.State) {
560 p.ErrorLock.Lock()
561 p.Error = multierror.Append(p.Error, fmt.Errorf(
562 "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
563 key, result.State, state))
564 p.ErrorLock.Unlock()
565 }
566 if !desired.Equal(result.Desired) {
567 p.ErrorLock.Lock()
568 p.Error = multierror.Append(p.Error, fmt.Errorf(
569 "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
570 key, result.Desired, desired))
571 p.ErrorLock.Unlock()
572 }
573
574 return result.Result, result.ResultErr
575}
576
577func (p *shadowResourceProviderShadow) Refresh(
578 info *InstanceInfo,
579 state *InstanceState) (*InstanceState, error) {
580 // Unique key
581 key := info.uniqueId()
582 raw := p.Shared.Refresh.Value(key)
583 if raw == nil {
584 p.ErrorLock.Lock()
585 defer p.ErrorLock.Unlock()
586 p.Error = multierror.Append(p.Error, fmt.Errorf(
587 "Unknown 'refresh' call for %q:\n\n%#v",
588 key, state))
589 return nil, nil
590 }
591
592 result, ok := raw.(*shadowResourceProviderRefresh)
593 if !ok {
594 p.ErrorLock.Lock()
595 defer p.ErrorLock.Unlock()
596 p.Error = multierror.Append(p.Error, fmt.Errorf(
597 "Unknown 'refresh' shadow value: %#v", raw))
598 return nil, nil
599 }
600
601 // Compare the parameters, which should be identical
602 if !state.Equal(result.State) {
603 p.ErrorLock.Lock()
604 p.Error = multierror.Append(p.Error, fmt.Errorf(
605 "Refresh %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
606 key, result.State, state))
607 p.ErrorLock.Unlock()
608 }
609
610 return result.Result, result.ResultErr
611}
612
613func (p *shadowResourceProviderShadow) ValidateDataSource(
614 t string, c *ResourceConfig) ([]string, []error) {
615 // Unique key
616 key := t
617
618 // Get the initial value
619 raw := p.Shared.ValidateDataSource.Value(key)
620
621 // Find a validation with our configuration
622 var result *shadowResourceProviderValidateDataSource
623 for {
624 // Get the value
625 if raw == nil {
626 p.ErrorLock.Lock()
627 defer p.ErrorLock.Unlock()
628 p.Error = multierror.Append(p.Error, fmt.Errorf(
629 "Unknown 'ValidateDataSource' call for %q:\n\n%#v",
630 key, c))
631 return nil, nil
632 }
633
634 wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
635 if !ok {
636 p.ErrorLock.Lock()
637 defer p.ErrorLock.Unlock()
638 p.Error = multierror.Append(p.Error, fmt.Errorf(
639 "Unknown 'ValidateDataSource' shadow value: %#v", raw))
640 return nil, nil
641 }
642
643 // Look for the matching call with our configuration
644 wrapper.RLock()
645 for _, call := range wrapper.Calls {
646 if call.Config.Equal(c) {
647 result = call
648 break
649 }
650 }
651 wrapper.RUnlock()
652
653 // If we found a result, exit
654 if result != nil {
655 break
656 }
657
658 // Wait for a change so we can get the wrapper again
659 raw = p.Shared.ValidateDataSource.WaitForChange(key)
660 }
661
662 return result.Warns, result.Errors
663}
664
665func (p *shadowResourceProviderShadow) ReadDataDiff(
666 info *InstanceInfo,
667 desired *ResourceConfig) (*InstanceDiff, error) {
668 // Unique key
669 key := info.uniqueId()
670 raw := p.Shared.ReadDataDiff.Value(key)
671 if raw == nil {
672 p.ErrorLock.Lock()
673 defer p.ErrorLock.Unlock()
674 p.Error = multierror.Append(p.Error, fmt.Errorf(
675 "Unknown 'ReadDataDiff' call for %q:\n\n%#v",
676 key, desired))
677 return nil, nil
678 }
679
680 result, ok := raw.(*shadowResourceProviderReadDataDiff)
681 if !ok {
682 p.ErrorLock.Lock()
683 defer p.ErrorLock.Unlock()
684 p.Error = multierror.Append(p.Error, fmt.Errorf(
685 "Unknown 'ReadDataDiff' shadow value for %q: %#v", key, raw))
686 return nil, nil
687 }
688
689 // Compare the parameters, which should be identical
690 if !desired.Equal(result.Desired) {
691 p.ErrorLock.Lock()
692 p.Error = multierror.Append(p.Error, fmt.Errorf(
693 "ReadDataDiff %q had unequal configs (real, then shadow):\n\n%#v\n\n%#v",
694 key, result.Desired, desired))
695 p.ErrorLock.Unlock()
696 }
697
698 return result.Result, result.ResultErr
699}
700
701func (p *shadowResourceProviderShadow) ReadDataApply(
702 info *InstanceInfo,
703 d *InstanceDiff) (*InstanceState, error) {
704 // Unique key
705 key := info.uniqueId()
706 raw := p.Shared.ReadDataApply.Value(key)
707 if raw == nil {
708 p.ErrorLock.Lock()
709 defer p.ErrorLock.Unlock()
710 p.Error = multierror.Append(p.Error, fmt.Errorf(
711 "Unknown 'ReadDataApply' call for %q:\n\n%#v",
712 key, d))
713 return nil, nil
714 }
715
716 result, ok := raw.(*shadowResourceProviderReadDataApply)
717 if !ok {
718 p.ErrorLock.Lock()
719 defer p.ErrorLock.Unlock()
720 p.Error = multierror.Append(p.Error, fmt.Errorf(
721 "Unknown 'ReadDataApply' shadow value for %q: %#v", key, raw))
722 return nil, nil
723 }
724
725 // Compare the parameters, which should be identical
726 if !d.Equal(result.Diff) {
727 p.ErrorLock.Lock()
728 p.Error = multierror.Append(p.Error, fmt.Errorf(
729 "ReadDataApply: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
730 result.Diff, d))
731 p.ErrorLock.Unlock()
732 }
733
734 return result.Result, result.ResultErr
735}
736
737func (p *shadowResourceProviderShadow) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
738 panic("import not supported by shadow graph")
739}
740
741// The structs for the various function calls are put below. These structs
742// are used to carry call information across the real/shadow boundaries.
743
744type shadowResourceProviderInput struct {
745 Config *ResourceConfig
746 Result *ResourceConfig
747 ResultErr error
748}
749
750type shadowResourceProviderValidate struct {
751 Config *ResourceConfig
752 ResultWarn []string
753 ResultErr []error
754}
755
756type shadowResourceProviderConfigure struct {
757 Config *ResourceConfig
758 Result error
759}
760
761type shadowResourceProviderValidateResourceWrapper struct {
762 sync.RWMutex
763
764 Calls []*shadowResourceProviderValidateResource
765}
766
767type shadowResourceProviderValidateResource struct {
768 Config *ResourceConfig
769 Warns []string
770 Errors []error
771}
772
773type shadowResourceProviderApply struct {
774 State *InstanceState
775 Diff *InstanceDiff
776 Result *InstanceState
777 ResultErr error
778}
779
780type shadowResourceProviderDiff struct {
781 State *InstanceState
782 Desired *ResourceConfig
783 Result *InstanceDiff
784 ResultErr error
785}
786
787type shadowResourceProviderRefresh struct {
788 State *InstanceState
789 Result *InstanceState
790 ResultErr error
791}
792
793type shadowResourceProviderValidateDataSourceWrapper struct {
794 sync.RWMutex
795
796 Calls []*shadowResourceProviderValidateDataSource
797}
798
799type shadowResourceProviderValidateDataSource struct {
800 Config *ResourceConfig
801 Warns []string
802 Errors []error
803}
804
805type shadowResourceProviderReadDataDiff struct {
806 Desired *ResourceConfig
807 Result *InstanceDiff
808 ResultErr error
809}
810
811type shadowResourceProviderReadDataApply struct {
812 Diff *InstanceDiff
813 Result *InstanceState
814 ResultErr error
815}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
new file mode 100644
index 0000000..60a4908
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
@@ -0,0 +1,282 @@
1package terraform
2
3import (
4 "fmt"
5 "io"
6 "log"
7 "sync"
8
9 "github.com/hashicorp/go-multierror"
10 "github.com/hashicorp/terraform/helper/shadow"
11)
12
13// shadowResourceProvisioner implements ResourceProvisioner for the shadow
14// eval context defined in eval_context_shadow.go.
15//
16// This is used to verify behavior with a real provisioner. This shouldn't
17// be used directly.
18type shadowResourceProvisioner interface {
19 ResourceProvisioner
20 Shadow
21}
22
23// newShadowResourceProvisioner creates a new shadowed ResourceProvisioner.
24func newShadowResourceProvisioner(
25 p ResourceProvisioner) (ResourceProvisioner, shadowResourceProvisioner) {
26 // Create the shared data
27 shared := shadowResourceProvisionerShared{
28 Validate: shadow.ComparedValue{
29 Func: shadowResourceProvisionerValidateCompare,
30 },
31 }
32
33 // Create the real provisioner that does actual work
34 real := &shadowResourceProvisionerReal{
35 ResourceProvisioner: p,
36 Shared: &shared,
37 }
38
39 // Create the shadow that watches the real value
40 shadow := &shadowResourceProvisionerShadow{
41 Shared: &shared,
42 }
43
44 return real, shadow
45}
46
47// shadowResourceProvisionerReal is the real resource provisioner. Function calls
48// to this will perform real work. This records the parameters and return
49// values and call order for the shadow to reproduce.
50type shadowResourceProvisionerReal struct {
51 ResourceProvisioner
52
53 Shared *shadowResourceProvisionerShared
54}
55
56func (p *shadowResourceProvisionerReal) Close() error {
57 var result error
58 if c, ok := p.ResourceProvisioner.(ResourceProvisionerCloser); ok {
59 result = c.Close()
60 }
61
62 p.Shared.CloseErr.SetValue(result)
63 return result
64}
65
66func (p *shadowResourceProvisionerReal) Validate(c *ResourceConfig) ([]string, []error) {
67 warns, errs := p.ResourceProvisioner.Validate(c)
68 p.Shared.Validate.SetValue(&shadowResourceProvisionerValidate{
69 Config: c,
70 ResultWarn: warns,
71 ResultErr: errs,
72 })
73
74 return warns, errs
75}
76
77func (p *shadowResourceProvisionerReal) Apply(
78 output UIOutput, s *InstanceState, c *ResourceConfig) error {
79 err := p.ResourceProvisioner.Apply(output, s, c)
80
81 // Write the result, grab a lock for writing. This should nver
82 // block long since the operations below don't block.
83 p.Shared.ApplyLock.Lock()
84 defer p.Shared.ApplyLock.Unlock()
85
86 key := s.ID
87 raw, ok := p.Shared.Apply.ValueOk(key)
88 if !ok {
89 // Setup a new value
90 raw = &shadow.ComparedValue{
91 Func: shadowResourceProvisionerApplyCompare,
92 }
93
94 // Set it
95 p.Shared.Apply.SetValue(key, raw)
96 }
97
98 compareVal, ok := raw.(*shadow.ComparedValue)
99 if !ok {
100 // Just log and return so that we don't cause the real side
101 // any side effects.
102 log.Printf("[ERROR] unknown value in 'apply': %#v", raw)
103 return err
104 }
105
106 // Write the resulting value
107 compareVal.SetValue(&shadowResourceProvisionerApply{
108 Config: c,
109 ResultErr: err,
110 })
111
112 return err
113}
114
115func (p *shadowResourceProvisionerReal) Stop() error {
116 return p.ResourceProvisioner.Stop()
117}
118
119// shadowResourceProvisionerShadow is the shadow resource provisioner. Function
120// calls never affect real resources. This is paired with the "real" side
121// which must be called properly to enable recording.
122type shadowResourceProvisionerShadow struct {
123 Shared *shadowResourceProvisionerShared
124
125 Error error // Error is the list of errors from the shadow
126 ErrorLock sync.Mutex
127}
128
129type shadowResourceProvisionerShared struct {
130 // NOTE: Anytime a value is added here, be sure to add it to
131 // the Close() method so that it is closed.
132
133 CloseErr shadow.Value
134 Validate shadow.ComparedValue
135 Apply shadow.KeyedValue
136 ApplyLock sync.Mutex // For writing only
137}
138
139func (p *shadowResourceProvisionerShared) Close() error {
140 closers := []io.Closer{
141 &p.CloseErr,
142 }
143
144 for _, c := range closers {
145 // This should never happen, but we don't panic because a panic
146 // could affect the real behavior of Terraform and a shadow should
147 // never be able to do that.
148 if err := c.Close(); err != nil {
149 return err
150 }
151 }
152
153 return nil
154}
155
156func (p *shadowResourceProvisionerShadow) CloseShadow() error {
157 err := p.Shared.Close()
158 if err != nil {
159 err = fmt.Errorf("close error: %s", err)
160 }
161
162 return err
163}
164
165func (p *shadowResourceProvisionerShadow) ShadowError() error {
166 return p.Error
167}
168
169func (p *shadowResourceProvisionerShadow) Close() error {
170 v := p.Shared.CloseErr.Value()
171 if v == nil {
172 return nil
173 }
174
175 return v.(error)
176}
177
178func (p *shadowResourceProvisionerShadow) Validate(c *ResourceConfig) ([]string, []error) {
179 // Get the result of the validate call
180 raw := p.Shared.Validate.Value(c)
181 if raw == nil {
182 return nil, nil
183 }
184
185 result, ok := raw.(*shadowResourceProvisionerValidate)
186 if !ok {
187 p.ErrorLock.Lock()
188 defer p.ErrorLock.Unlock()
189 p.Error = multierror.Append(p.Error, fmt.Errorf(
190 "Unknown 'validate' shadow value: %#v", raw))
191 return nil, nil
192 }
193
194 // We don't need to compare configurations because we key on the
195 // configuration so just return right away.
196 return result.ResultWarn, result.ResultErr
197}
198
199func (p *shadowResourceProvisionerShadow) Apply(
200 output UIOutput, s *InstanceState, c *ResourceConfig) error {
201 // Get the value based on the key
202 key := s.ID
203 raw := p.Shared.Apply.Value(key)
204 if raw == nil {
205 return nil
206 }
207
208 compareVal, ok := raw.(*shadow.ComparedValue)
209 if !ok {
210 p.ErrorLock.Lock()
211 defer p.ErrorLock.Unlock()
212 p.Error = multierror.Append(p.Error, fmt.Errorf(
213 "Unknown 'apply' shadow value: %#v", raw))
214 return nil
215 }
216
217 // With the compared value, we compare against our config
218 raw = compareVal.Value(c)
219 if raw == nil {
220 return nil
221 }
222
223 result, ok := raw.(*shadowResourceProvisionerApply)
224 if !ok {
225 p.ErrorLock.Lock()
226 defer p.ErrorLock.Unlock()
227 p.Error = multierror.Append(p.Error, fmt.Errorf(
228 "Unknown 'apply' shadow value: %#v", raw))
229 return nil
230 }
231
232 return result.ResultErr
233}
234
235func (p *shadowResourceProvisionerShadow) Stop() error {
236 // For the shadow, we always just return nil since a Stop indicates
237 // that we were interrupted and shadows are disabled during interrupts
238 // anyways.
239 return nil
240}
241
242// The structs for the various function calls are put below. These structs
243// are used to carry call information across the real/shadow boundaries.
244
245type shadowResourceProvisionerValidate struct {
246 Config *ResourceConfig
247 ResultWarn []string
248 ResultErr []error
249}
250
251type shadowResourceProvisionerApply struct {
252 Config *ResourceConfig
253 ResultErr error
254}
255
256func shadowResourceProvisionerValidateCompare(k, v interface{}) bool {
257 c, ok := k.(*ResourceConfig)
258 if !ok {
259 return false
260 }
261
262 result, ok := v.(*shadowResourceProvisionerValidate)
263 if !ok {
264 return false
265 }
266
267 return c.Equal(result.Config)
268}
269
270func shadowResourceProvisionerApplyCompare(k, v interface{}) bool {
271 c, ok := k.(*ResourceConfig)
272 if !ok {
273 return false
274 }
275
276 result, ok := v.(*shadowResourceProvisionerApply)
277 if !ok {
278 return false
279 }
280
281 return c.Equal(result.Config)
282}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go
new file mode 100644
index 0000000..074b682
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state.go
@@ -0,0 +1,2118 @@
1package terraform
2
3import (
4 "bufio"
5 "bytes"
6 "encoding/json"
7 "errors"
8 "fmt"
9 "io"
10 "io/ioutil"
11 "log"
12 "reflect"
13 "sort"
14 "strconv"
15 "strings"
16 "sync"
17
18 "github.com/hashicorp/go-multierror"
19 "github.com/hashicorp/go-version"
20 "github.com/hashicorp/terraform/config"
21 "github.com/mitchellh/copystructure"
22 "github.com/satori/go.uuid"
23)
24
25const (
26 // StateVersion is the current version for our state file
27 StateVersion = 3
28)
29
30// rootModulePath is the path of the root module
31var rootModulePath = []string{"root"}
32
33// normalizeModulePath takes a raw module path and returns a path that
34// has the rootModulePath prepended to it. If I could go back in time I
35// would've never had a rootModulePath (empty path would be root). We can
36// still fix this but thats a big refactor that my branch doesn't make sense
37// for. Instead, this function normalizes paths.
38func normalizeModulePath(p []string) []string {
39 k := len(rootModulePath)
40
41 // If we already have a root module prefix, we're done
42 if len(p) >= len(rootModulePath) {
43 if reflect.DeepEqual(p[:k], rootModulePath) {
44 return p
45 }
46 }
47
48 // None? Prefix it
49 result := make([]string, len(rootModulePath)+len(p))
50 copy(result, rootModulePath)
51 copy(result[k:], p)
52 return result
53}
54
55// State keeps track of a snapshot state-of-the-world that Terraform
56// can use to keep track of what real world resources it is actually
57// managing.
58type State struct {
59 // Version is the state file protocol version.
60 Version int `json:"version"`
61
62 // TFVersion is the version of Terraform that wrote this state.
63 TFVersion string `json:"terraform_version,omitempty"`
64
65 // Serial is incremented on any operation that modifies
66 // the State file. It is used to detect potentially conflicting
67 // updates.
68 Serial int64 `json:"serial"`
69
70 // Lineage is set when a new, blank state is created and then
71 // never updated. This allows us to determine whether the serials
72 // of two states can be meaningfully compared.
73 // Apart from the guarantee that collisions between two lineages
74 // are very unlikely, this value is opaque and external callers
75 // should only compare lineage strings byte-for-byte for equality.
76 Lineage string `json:"lineage"`
77
78 // Remote is used to track the metadata required to
79 // pull and push state files from a remote storage endpoint.
80 Remote *RemoteState `json:"remote,omitempty"`
81
82 // Backend tracks the configuration for the backend in use with
83 // this state. This is used to track any changes in the backend
84 // configuration.
85 Backend *BackendState `json:"backend,omitempty"`
86
87 // Modules contains all the modules in a breadth-first order
88 Modules []*ModuleState `json:"modules"`
89
90 mu sync.Mutex
91}
92
93func (s *State) Lock() { s.mu.Lock() }
94func (s *State) Unlock() { s.mu.Unlock() }
95
96// NewState is used to initialize a blank state
97func NewState() *State {
98 s := &State{}
99 s.init()
100 return s
101}
102
103// Children returns the ModuleStates that are direct children of
104// the given path. If the path is "root", for example, then children
105// returned might be "root.child", but not "root.child.grandchild".
106func (s *State) Children(path []string) []*ModuleState {
107 s.Lock()
108 defer s.Unlock()
109 // TODO: test
110
111 return s.children(path)
112}
113
114func (s *State) children(path []string) []*ModuleState {
115 result := make([]*ModuleState, 0)
116 for _, m := range s.Modules {
117 if m == nil {
118 continue
119 }
120
121 if len(m.Path) != len(path)+1 {
122 continue
123 }
124 if !reflect.DeepEqual(path, m.Path[:len(path)]) {
125 continue
126 }
127
128 result = append(result, m)
129 }
130
131 return result
132}
133
134// AddModule adds the module with the given path to the state.
135//
136// This should be the preferred method to add module states since it
137// allows us to optimize lookups later as well as control sorting.
138func (s *State) AddModule(path []string) *ModuleState {
139 s.Lock()
140 defer s.Unlock()
141
142 return s.addModule(path)
143}
144
145func (s *State) addModule(path []string) *ModuleState {
146 // check if the module exists first
147 m := s.moduleByPath(path)
148 if m != nil {
149 return m
150 }
151
152 m = &ModuleState{Path: path}
153 m.init()
154 s.Modules = append(s.Modules, m)
155 s.sort()
156 return m
157}
158
159// ModuleByPath is used to lookup the module state for the given path.
160// This should be the preferred lookup mechanism as it allows for future
161// lookup optimizations.
162func (s *State) ModuleByPath(path []string) *ModuleState {
163 if s == nil {
164 return nil
165 }
166 s.Lock()
167 defer s.Unlock()
168
169 return s.moduleByPath(path)
170}
171
172func (s *State) moduleByPath(path []string) *ModuleState {
173 for _, mod := range s.Modules {
174 if mod == nil {
175 continue
176 }
177 if mod.Path == nil {
178 panic("missing module path")
179 }
180 if reflect.DeepEqual(mod.Path, path) {
181 return mod
182 }
183 }
184 return nil
185}
186
187// ModuleOrphans returns all the module orphans in this state by
188// returning their full paths. These paths can be used with ModuleByPath
189// to return the actual state.
190func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string {
191 s.Lock()
192 defer s.Unlock()
193
194 return s.moduleOrphans(path, c)
195
196}
197
198func (s *State) moduleOrphans(path []string, c *config.Config) [][]string {
199 // direct keeps track of what direct children we have both in our config
200 // and in our state. childrenKeys keeps track of what isn't an orphan.
201 direct := make(map[string]struct{})
202 childrenKeys := make(map[string]struct{})
203 if c != nil {
204 for _, m := range c.Modules {
205 childrenKeys[m.Name] = struct{}{}
206 direct[m.Name] = struct{}{}
207 }
208 }
209
210 // Go over the direct children and find any that aren't in our keys.
211 var orphans [][]string
212 for _, m := range s.children(path) {
213 key := m.Path[len(m.Path)-1]
214
215 // Record that we found this key as a direct child. We use this
216 // later to find orphan nested modules.
217 direct[key] = struct{}{}
218
219 // If we have a direct child still in our config, it is not an orphan
220 if _, ok := childrenKeys[key]; ok {
221 continue
222 }
223
224 orphans = append(orphans, m.Path)
225 }
226
227 // Find the orphans that are nested...
228 for _, m := range s.Modules {
229 if m == nil {
230 continue
231 }
232
233 // We only want modules that are at least grandchildren
234 if len(m.Path) < len(path)+2 {
235 continue
236 }
237
238 // If it isn't part of our tree, continue
239 if !reflect.DeepEqual(path, m.Path[:len(path)]) {
240 continue
241 }
242
243 // If we have the direct child, then just skip it.
244 key := m.Path[len(path)]
245 if _, ok := direct[key]; ok {
246 continue
247 }
248
249 orphanPath := m.Path[:len(path)+1]
250
251 // Don't double-add if we've already added this orphan (which can happen if
252 // there are multiple nested sub-modules that get orphaned together).
253 alreadyAdded := false
254 for _, o := range orphans {
255 if reflect.DeepEqual(o, orphanPath) {
256 alreadyAdded = true
257 break
258 }
259 }
260 if alreadyAdded {
261 continue
262 }
263
264 // Add this orphan
265 orphans = append(orphans, orphanPath)
266 }
267
268 return orphans
269}
270
271// Empty returns true if the state is empty.
272func (s *State) Empty() bool {
273 if s == nil {
274 return true
275 }
276 s.Lock()
277 defer s.Unlock()
278
279 return len(s.Modules) == 0
280}
281
282// HasResources returns true if the state contains any resources.
283//
284// This is similar to !s.Empty, but returns true also in the case where the
285// state has modules but all of them are devoid of resources.
286func (s *State) HasResources() bool {
287 if s.Empty() {
288 return false
289 }
290
291 for _, mod := range s.Modules {
292 if len(mod.Resources) > 0 {
293 return true
294 }
295 }
296
297 return false
298}
299
300// IsRemote returns true if State represents a state that exists and is
301// remote.
302func (s *State) IsRemote() bool {
303 if s == nil {
304 return false
305 }
306 s.Lock()
307 defer s.Unlock()
308
309 if s.Remote == nil {
310 return false
311 }
312 if s.Remote.Type == "" {
313 return false
314 }
315
316 return true
317}
318
319// Validate validates the integrity of this state file.
320//
321// Certain properties of the statefile are expected by Terraform in order
322// to behave properly. The core of Terraform will assume that once it
323// receives a State structure that it has been validated. This validation
324// check should be called to ensure that.
325//
326// If this returns an error, then the user should be notified. The error
327// response will include detailed information on the nature of the error.
328func (s *State) Validate() error {
329 s.Lock()
330 defer s.Unlock()
331
332 var result error
333
334 // !!!! FOR DEVELOPERS !!!!
335 //
336 // Any errors returned from this Validate function will BLOCK TERRAFORM
337 // from loading a state file. Therefore, this should only contain checks
338 // that are only resolvable through manual intervention.
339 //
340 // !!!! FOR DEVELOPERS !!!!
341
342 // Make sure there are no duplicate module states. We open a new
343 // block here so we can use basic variable names and future validations
344 // can do the same.
345 {
346 found := make(map[string]struct{})
347 for _, ms := range s.Modules {
348 if ms == nil {
349 continue
350 }
351
352 key := strings.Join(ms.Path, ".")
353 if _, ok := found[key]; ok {
354 result = multierror.Append(result, fmt.Errorf(
355 strings.TrimSpace(stateValidateErrMultiModule), key))
356 continue
357 }
358
359 found[key] = struct{}{}
360 }
361 }
362
363 return result
364}
365
366// Remove removes the item in the state at the given address, returning
367// any errors that may have occurred.
368//
369// If the address references a module state or resource, it will delete
370// all children as well. To check what will be deleted, use a StateFilter
371// first.
372func (s *State) Remove(addr ...string) error {
373 s.Lock()
374 defer s.Unlock()
375
376 // Filter out what we need to delete
377 filter := &StateFilter{State: s}
378 results, err := filter.Filter(addr...)
379 if err != nil {
380 return err
381 }
382
383 // If we have no results, just exit early, we're not going to do anything.
384 // While what happens below is fairly fast, this is an important early
385 // exit since the prune below might modify the state more and we don't
386 // want to modify the state if we don't have to.
387 if len(results) == 0 {
388 return nil
389 }
390
391 // Go through each result and grab what we need
392 removed := make(map[interface{}]struct{})
393 for _, r := range results {
394 // Convert the path to our own type
395 path := append([]string{"root"}, r.Path...)
396
397 // If we removed this already, then ignore
398 if _, ok := removed[r.Value]; ok {
399 continue
400 }
401
402 // If we removed the parent already, then ignore
403 if r.Parent != nil {
404 if _, ok := removed[r.Parent.Value]; ok {
405 continue
406 }
407 }
408
409 // Add this to the removed list
410 removed[r.Value] = struct{}{}
411
412 switch v := r.Value.(type) {
413 case *ModuleState:
414 s.removeModule(path, v)
415 case *ResourceState:
416 s.removeResource(path, v)
417 case *InstanceState:
418 s.removeInstance(path, r.Parent.Value.(*ResourceState), v)
419 default:
420 return fmt.Errorf("unknown type to delete: %T", r.Value)
421 }
422 }
423
424 // Prune since the removal functions often do the bare minimum to
425 // remove a thing and may leave around dangling empty modules, resources,
426 // etc. Prune will clean that all up.
427 s.prune()
428
429 return nil
430}
431
432func (s *State) removeModule(path []string, v *ModuleState) {
433 for i, m := range s.Modules {
434 if m == v {
435 s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil
436 return
437 }
438 }
439}
440
441func (s *State) removeResource(path []string, v *ResourceState) {
442 // Get the module this resource lives in. If it doesn't exist, we're done.
443 mod := s.moduleByPath(path)
444 if mod == nil {
445 return
446 }
447
448 // Find this resource. This is a O(N) lookup when if we had the key
449 // it could be O(1) but even with thousands of resources this shouldn't
450 // matter right now. We can easily up performance here when the time comes.
451 for k, r := range mod.Resources {
452 if r == v {
453 // Found it
454 delete(mod.Resources, k)
455 return
456 }
457 }
458}
459
460func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) {
461 // Go through the resource and find the instance that matches this
462 // (if any) and remove it.
463
464 // Check primary
465 if r.Primary == v {
466 r.Primary = nil
467 return
468 }
469
470 // Check lists
471 lists := [][]*InstanceState{r.Deposed}
472 for _, is := range lists {
473 for i, instance := range is {
474 if instance == v {
475 // Found it, remove it
476 is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil
477
478 // Done
479 return
480 }
481 }
482 }
483}
484
485// RootModule returns the ModuleState for the root module
486func (s *State) RootModule() *ModuleState {
487 root := s.ModuleByPath(rootModulePath)
488 if root == nil {
489 panic("missing root module")
490 }
491 return root
492}
493
494// Equal tests if one state is equal to another.
495func (s *State) Equal(other *State) bool {
496 // If one is nil, we do a direct check
497 if s == nil || other == nil {
498 return s == other
499 }
500
501 s.Lock()
502 defer s.Unlock()
503 return s.equal(other)
504}
505
506func (s *State) equal(other *State) bool {
507 if s == nil || other == nil {
508 return s == other
509 }
510
511 // If the versions are different, they're certainly not equal
512 if s.Version != other.Version {
513 return false
514 }
515
516 // If any of the modules are not equal, then this state isn't equal
517 if len(s.Modules) != len(other.Modules) {
518 return false
519 }
520 for _, m := range s.Modules {
521 // This isn't very optimal currently but works.
522 otherM := other.moduleByPath(m.Path)
523 if otherM == nil {
524 return false
525 }
526
527 // If they're not equal, then we're not equal!
528 if !m.Equal(otherM) {
529 return false
530 }
531 }
532
533 return true
534}
535
536type StateAgeComparison int
537
538const (
539 StateAgeEqual StateAgeComparison = 0
540 StateAgeReceiverNewer StateAgeComparison = 1
541 StateAgeReceiverOlder StateAgeComparison = -1
542)
543
544// CompareAges compares one state with another for which is "older".
545//
546// This is a simple check using the state's serial, and is thus only as
547// reliable as the serial itself. In the normal case, only one state
548// exists for a given combination of lineage/serial, but Terraform
549// does not guarantee this and so the result of this method should be
550// used with care.
551//
552// Returns an integer that is negative if the receiver is older than
553// the argument, positive if the converse, and zero if they are equal.
554// An error is returned if the two states are not of the same lineage,
555// in which case the integer returned has no meaning.
556func (s *State) CompareAges(other *State) (StateAgeComparison, error) {
557 // nil states are "older" than actual states
558 switch {
559 case s != nil && other == nil:
560 return StateAgeReceiverNewer, nil
561 case s == nil && other != nil:
562 return StateAgeReceiverOlder, nil
563 case s == nil && other == nil:
564 return StateAgeEqual, nil
565 }
566
567 if !s.SameLineage(other) {
568 return StateAgeEqual, fmt.Errorf(
569 "can't compare two states of differing lineage",
570 )
571 }
572
573 s.Lock()
574 defer s.Unlock()
575
576 switch {
577 case s.Serial < other.Serial:
578 return StateAgeReceiverOlder, nil
579 case s.Serial > other.Serial:
580 return StateAgeReceiverNewer, nil
581 default:
582 return StateAgeEqual, nil
583 }
584}
585
586// SameLineage returns true only if the state given in argument belongs
587// to the same "lineage" of states as the receiver.
588func (s *State) SameLineage(other *State) bool {
589 s.Lock()
590 defer s.Unlock()
591
592 // If one of the states has no lineage then it is assumed to predate
593 // this concept, and so we'll accept it as belonging to any lineage
594 // so that a lineage string can be assigned to newer versions
595 // without breaking compatibility with older versions.
596 if s.Lineage == "" || other.Lineage == "" {
597 return true
598 }
599
600 return s.Lineage == other.Lineage
601}
602
603// DeepCopy performs a deep copy of the state structure and returns
604// a new structure.
605func (s *State) DeepCopy() *State {
606 copy, err := copystructure.Config{Lock: true}.Copy(s)
607 if err != nil {
608 panic(err)
609 }
610
611 return copy.(*State)
612}
613
614// IncrementSerialMaybe increments the serial number of this state
615// if it different from the other state.
616func (s *State) IncrementSerialMaybe(other *State) {
617 if s == nil {
618 return
619 }
620 if other == nil {
621 return
622 }
623 s.Lock()
624 defer s.Unlock()
625
626 if s.Serial > other.Serial {
627 return
628 }
629 if other.TFVersion != s.TFVersion || !s.equal(other) {
630 if other.Serial > s.Serial {
631 s.Serial = other.Serial
632 }
633
634 s.Serial++
635 }
636}
637
638// FromFutureTerraform checks if this state was written by a Terraform
639// version from the future.
640func (s *State) FromFutureTerraform() bool {
641 s.Lock()
642 defer s.Unlock()
643
644 // No TF version means it is certainly from the past
645 if s.TFVersion == "" {
646 return false
647 }
648
649 v := version.Must(version.NewVersion(s.TFVersion))
650 return SemVersion.LessThan(v)
651}
652
653func (s *State) Init() {
654 s.Lock()
655 defer s.Unlock()
656 s.init()
657}
658
659func (s *State) init() {
660 if s.Version == 0 {
661 s.Version = StateVersion
662 }
663 if s.moduleByPath(rootModulePath) == nil {
664 s.addModule(rootModulePath)
665 }
666 s.ensureHasLineage()
667
668 for _, mod := range s.Modules {
669 if mod != nil {
670 mod.init()
671 }
672 }
673
674 if s.Remote != nil {
675 s.Remote.init()
676 }
677
678}
679
680func (s *State) EnsureHasLineage() {
681 s.Lock()
682 defer s.Unlock()
683
684 s.ensureHasLineage()
685}
686
687func (s *State) ensureHasLineage() {
688 if s.Lineage == "" {
689 s.Lineage = uuid.NewV4().String()
690 log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage)
691 } else {
692 log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage)
693 }
694}
695
696// AddModuleState insert this module state and override any existing ModuleState
697func (s *State) AddModuleState(mod *ModuleState) {
698 mod.init()
699 s.Lock()
700 defer s.Unlock()
701
702 s.addModuleState(mod)
703}
704
705func (s *State) addModuleState(mod *ModuleState) {
706 for i, m := range s.Modules {
707 if reflect.DeepEqual(m.Path, mod.Path) {
708 s.Modules[i] = mod
709 return
710 }
711 }
712
713 s.Modules = append(s.Modules, mod)
714 s.sort()
715}
716
717// prune is used to remove any resources that are no longer required
718func (s *State) prune() {
719 if s == nil {
720 return
721 }
722
723 // Filter out empty modules.
724 // A module is always assumed to have a path, and it's length isn't always
725 // bounds checked later on. Modules may be "emptied" during destroy, but we
726 // never want to store those in the state.
727 for i := 0; i < len(s.Modules); i++ {
728 if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 {
729 s.Modules = append(s.Modules[:i], s.Modules[i+1:]...)
730 i--
731 }
732 }
733
734 for _, mod := range s.Modules {
735 mod.prune()
736 }
737 if s.Remote != nil && s.Remote.Empty() {
738 s.Remote = nil
739 }
740}
741
742// sort sorts the modules
743func (s *State) sort() {
744 sort.Sort(moduleStateSort(s.Modules))
745
746 // Allow modules to be sorted
747 for _, m := range s.Modules {
748 if m != nil {
749 m.sort()
750 }
751 }
752}
753
754func (s *State) String() string {
755 if s == nil {
756 return "<nil>"
757 }
758 s.Lock()
759 defer s.Unlock()
760
761 var buf bytes.Buffer
762 for _, m := range s.Modules {
763 mStr := m.String()
764
765 // If we're the root module, we just write the output directly.
766 if reflect.DeepEqual(m.Path, rootModulePath) {
767 buf.WriteString(mStr + "\n")
768 continue
769 }
770
771 buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], ".")))
772
773 s := bufio.NewScanner(strings.NewReader(mStr))
774 for s.Scan() {
775 text := s.Text()
776 if text != "" {
777 text = " " + text
778 }
779
780 buf.WriteString(fmt.Sprintf("%s\n", text))
781 }
782 }
783
784 return strings.TrimSpace(buf.String())
785}
786
787// BackendState stores the configuration to connect to a remote backend.
788type BackendState struct {
789 Type string `json:"type"` // Backend type
790 Config map[string]interface{} `json:"config"` // Backend raw config
791
792 // Hash is the hash code to uniquely identify the original source
793 // configuration. We use this to detect when there is a change in
794 // configuration even when "type" isn't changed.
795 Hash uint64 `json:"hash"`
796}
797
798// Empty returns true if BackendState has no state.
799func (s *BackendState) Empty() bool {
800 return s == nil || s.Type == ""
801}
802
803// Rehash returns a unique content hash for this backend's configuration
804// as a uint64 value.
805// The Hash stored in the backend state needs to match the config itself, but
806// we need to compare the backend config after it has been combined with all
807// options.
808// This function must match the implementation used by config.Backend.
809func (s *BackendState) Rehash() uint64 {
810 if s == nil {
811 return 0
812 }
813
814 cfg := config.Backend{
815 Type: s.Type,
816 RawConfig: &config.RawConfig{
817 Raw: s.Config,
818 },
819 }
820
821 return cfg.Rehash()
822}
823
824// RemoteState is used to track the information about a remote
825// state store that we push/pull state to.
826type RemoteState struct {
827 // Type controls the client we use for the remote state
828 Type string `json:"type"`
829
830 // Config is used to store arbitrary configuration that
831 // is type specific
832 Config map[string]string `json:"config"`
833
834 mu sync.Mutex
835}
836
837func (s *RemoteState) Lock() { s.mu.Lock() }
838func (s *RemoteState) Unlock() { s.mu.Unlock() }
839
840func (r *RemoteState) init() {
841 r.Lock()
842 defer r.Unlock()
843
844 if r.Config == nil {
845 r.Config = make(map[string]string)
846 }
847}
848
849func (r *RemoteState) deepcopy() *RemoteState {
850 r.Lock()
851 defer r.Unlock()
852
853 confCopy := make(map[string]string, len(r.Config))
854 for k, v := range r.Config {
855 confCopy[k] = v
856 }
857 return &RemoteState{
858 Type: r.Type,
859 Config: confCopy,
860 }
861}
862
863func (r *RemoteState) Empty() bool {
864 if r == nil {
865 return true
866 }
867 r.Lock()
868 defer r.Unlock()
869
870 return r.Type == ""
871}
872
873func (r *RemoteState) Equals(other *RemoteState) bool {
874 r.Lock()
875 defer r.Unlock()
876
877 if r.Type != other.Type {
878 return false
879 }
880 if len(r.Config) != len(other.Config) {
881 return false
882 }
883 for k, v := range r.Config {
884 if other.Config[k] != v {
885 return false
886 }
887 }
888 return true
889}
890
891// OutputState is used to track the state relevant to a single output.
892type OutputState struct {
893 // Sensitive describes whether the output is considered sensitive,
894 // which may lead to masking the value on screen in some cases.
895 Sensitive bool `json:"sensitive"`
896 // Type describes the structure of Value. Valid values are "string",
897 // "map" and "list"
898 Type string `json:"type"`
899 // Value contains the value of the output, in the structure described
900 // by the Type field.
901 Value interface{} `json:"value"`
902
903 mu sync.Mutex
904}
905
906func (s *OutputState) Lock() { s.mu.Lock() }
907func (s *OutputState) Unlock() { s.mu.Unlock() }
908
909func (s *OutputState) String() string {
910 return fmt.Sprintf("%#v", s.Value)
911}
912
913// Equal compares two OutputState structures for equality. nil values are
914// considered equal.
915func (s *OutputState) Equal(other *OutputState) bool {
916 if s == nil && other == nil {
917 return true
918 }
919
920 if s == nil || other == nil {
921 return false
922 }
923 s.Lock()
924 defer s.Unlock()
925
926 if s.Type != other.Type {
927 return false
928 }
929
930 if s.Sensitive != other.Sensitive {
931 return false
932 }
933
934 if !reflect.DeepEqual(s.Value, other.Value) {
935 return false
936 }
937
938 return true
939}
940
941func (s *OutputState) deepcopy() *OutputState {
942 if s == nil {
943 return nil
944 }
945
946 stateCopy, err := copystructure.Config{Lock: true}.Copy(s)
947 if err != nil {
948 panic(fmt.Errorf("Error copying output value: %s", err))
949 }
950
951 return stateCopy.(*OutputState)
952}
953
954// ModuleState is used to track all the state relevant to a single
955// module. Previous to Terraform 0.3, all state belonged to the "root"
956// module.
957type ModuleState struct {
958 // Path is the import path from the root module. Modules imports are
959 // always disjoint, so the path represents amodule tree
960 Path []string `json:"path"`
961
962 // Outputs declared by the module and maintained for each module
963 // even though only the root module technically needs to be kept.
964 // This allows operators to inspect values at the boundaries.
965 Outputs map[string]*OutputState `json:"outputs"`
966
967 // Resources is a mapping of the logically named resource to
968 // the state of the resource. Each resource may actually have
969 // N instances underneath, although a user only needs to think
970 // about the 1:1 case.
971 Resources map[string]*ResourceState `json:"resources"`
972
973 // Dependencies are a list of things that this module relies on
974 // existing to remain intact. For example: an module may depend
975 // on a VPC ID given by an aws_vpc resource.
976 //
977 // Terraform uses this information to build valid destruction
978 // orders and to warn the user if they're destroying a module that
979 // another resource depends on.
980 //
981 // Things can be put into this list that may not be managed by
982 // Terraform. If Terraform doesn't find a matching ID in the
983 // overall state, then it assumes it isn't managed and doesn't
984 // worry about it.
985 Dependencies []string `json:"depends_on"`
986
987 mu sync.Mutex
988}
989
990func (s *ModuleState) Lock() { s.mu.Lock() }
991func (s *ModuleState) Unlock() { s.mu.Unlock() }
992
993// Equal tests whether one module state is equal to another.
994func (m *ModuleState) Equal(other *ModuleState) bool {
995 m.Lock()
996 defer m.Unlock()
997
998 // Paths must be equal
999 if !reflect.DeepEqual(m.Path, other.Path) {
1000 return false
1001 }
1002
1003 // Outputs must be equal
1004 if len(m.Outputs) != len(other.Outputs) {
1005 return false
1006 }
1007 for k, v := range m.Outputs {
1008 if !other.Outputs[k].Equal(v) {
1009 return false
1010 }
1011 }
1012
1013 // Dependencies must be equal. This sorts these in place but
1014 // this shouldn't cause any problems.
1015 sort.Strings(m.Dependencies)
1016 sort.Strings(other.Dependencies)
1017 if len(m.Dependencies) != len(other.Dependencies) {
1018 return false
1019 }
1020 for i, d := range m.Dependencies {
1021 if other.Dependencies[i] != d {
1022 return false
1023 }
1024 }
1025
1026 // Resources must be equal
1027 if len(m.Resources) != len(other.Resources) {
1028 return false
1029 }
1030 for k, r := range m.Resources {
1031 otherR, ok := other.Resources[k]
1032 if !ok {
1033 return false
1034 }
1035
1036 if !r.Equal(otherR) {
1037 return false
1038 }
1039 }
1040
1041 return true
1042}
1043
1044// IsRoot says whether or not this module diff is for the root module.
1045func (m *ModuleState) IsRoot() bool {
1046 m.Lock()
1047 defer m.Unlock()
1048 return reflect.DeepEqual(m.Path, rootModulePath)
1049}
1050
1051// IsDescendent returns true if other is a descendent of this module.
1052func (m *ModuleState) IsDescendent(other *ModuleState) bool {
1053 m.Lock()
1054 defer m.Unlock()
1055
1056 i := len(m.Path)
1057 return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path)
1058}
1059
1060// Orphans returns a list of keys of resources that are in the State
1061// but aren't present in the configuration itself. Hence, these keys
1062// represent the state of resources that are orphans.
1063func (m *ModuleState) Orphans(c *config.Config) []string {
1064 m.Lock()
1065 defer m.Unlock()
1066
1067 keys := make(map[string]struct{})
1068 for k, _ := range m.Resources {
1069 keys[k] = struct{}{}
1070 }
1071
1072 if c != nil {
1073 for _, r := range c.Resources {
1074 delete(keys, r.Id())
1075
1076 for k, _ := range keys {
1077 if strings.HasPrefix(k, r.Id()+".") {
1078 delete(keys, k)
1079 }
1080 }
1081 }
1082 }
1083
1084 result := make([]string, 0, len(keys))
1085 for k, _ := range keys {
1086 result = append(result, k)
1087 }
1088
1089 return result
1090}
1091
1092// View returns a view with the given resource prefix.
1093func (m *ModuleState) View(id string) *ModuleState {
1094 if m == nil {
1095 return m
1096 }
1097
1098 r := m.deepcopy()
1099 for k, _ := range r.Resources {
1100 if id == k || strings.HasPrefix(k, id+".") {
1101 continue
1102 }
1103
1104 delete(r.Resources, k)
1105 }
1106
1107 return r
1108}
1109
1110func (m *ModuleState) init() {
1111 m.Lock()
1112 defer m.Unlock()
1113
1114 if m.Path == nil {
1115 m.Path = []string{}
1116 }
1117 if m.Outputs == nil {
1118 m.Outputs = make(map[string]*OutputState)
1119 }
1120 if m.Resources == nil {
1121 m.Resources = make(map[string]*ResourceState)
1122 }
1123
1124 if m.Dependencies == nil {
1125 m.Dependencies = make([]string, 0)
1126 }
1127
1128 for _, rs := range m.Resources {
1129 rs.init()
1130 }
1131}
1132
1133func (m *ModuleState) deepcopy() *ModuleState {
1134 if m == nil {
1135 return nil
1136 }
1137
1138 stateCopy, err := copystructure.Config{Lock: true}.Copy(m)
1139 if err != nil {
1140 panic(err)
1141 }
1142
1143 return stateCopy.(*ModuleState)
1144}
1145
1146// prune is used to remove any resources that are no longer required
1147func (m *ModuleState) prune() {
1148 m.Lock()
1149 defer m.Unlock()
1150
1151 for k, v := range m.Resources {
1152 if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 {
1153 delete(m.Resources, k)
1154 continue
1155 }
1156
1157 v.prune()
1158 }
1159
1160 for k, v := range m.Outputs {
1161 if v.Value == config.UnknownVariableValue {
1162 delete(m.Outputs, k)
1163 }
1164 }
1165
1166 m.Dependencies = uniqueStrings(m.Dependencies)
1167}
1168
1169func (m *ModuleState) sort() {
1170 for _, v := range m.Resources {
1171 v.sort()
1172 }
1173}
1174
1175func (m *ModuleState) String() string {
1176 m.Lock()
1177 defer m.Unlock()
1178
1179 var buf bytes.Buffer
1180
1181 if len(m.Resources) == 0 {
1182 buf.WriteString("<no state>")
1183 }
1184
1185 names := make([]string, 0, len(m.Resources))
1186 for name, _ := range m.Resources {
1187 names = append(names, name)
1188 }
1189
1190 sort.Sort(resourceNameSort(names))
1191
1192 for _, k := range names {
1193 rs := m.Resources[k]
1194 var id string
1195 if rs.Primary != nil {
1196 id = rs.Primary.ID
1197 }
1198 if id == "" {
1199 id = "<not created>"
1200 }
1201
1202 taintStr := ""
1203 if rs.Primary.Tainted {
1204 taintStr = " (tainted)"
1205 }
1206
1207 deposedStr := ""
1208 if len(rs.Deposed) > 0 {
1209 deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed))
1210 }
1211
1212 buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr))
1213 buf.WriteString(fmt.Sprintf(" ID = %s\n", id))
1214 if rs.Provider != "" {
1215 buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider))
1216 }
1217
1218 var attributes map[string]string
1219 if rs.Primary != nil {
1220 attributes = rs.Primary.Attributes
1221 }
1222 attrKeys := make([]string, 0, len(attributes))
1223 for ak, _ := range attributes {
1224 if ak == "id" {
1225 continue
1226 }
1227
1228 attrKeys = append(attrKeys, ak)
1229 }
1230
1231 sort.Strings(attrKeys)
1232
1233 for _, ak := range attrKeys {
1234 av := attributes[ak]
1235 buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av))
1236 }
1237
1238 for idx, t := range rs.Deposed {
1239 taintStr := ""
1240 if t.Tainted {
1241 taintStr = " (tainted)"
1242 }
1243 buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr))
1244 }
1245
1246 if len(rs.Dependencies) > 0 {
1247 buf.WriteString(fmt.Sprintf("\n Dependencies:\n"))
1248 for _, dep := range rs.Dependencies {
1249 buf.WriteString(fmt.Sprintf(" %s\n", dep))
1250 }
1251 }
1252 }
1253
1254 if len(m.Outputs) > 0 {
1255 buf.WriteString("\nOutputs:\n\n")
1256
1257 ks := make([]string, 0, len(m.Outputs))
1258 for k, _ := range m.Outputs {
1259 ks = append(ks, k)
1260 }
1261
1262 sort.Strings(ks)
1263
1264 for _, k := range ks {
1265 v := m.Outputs[k]
1266 switch vTyped := v.Value.(type) {
1267 case string:
1268 buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
1269 case []interface{}:
1270 buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
1271 case map[string]interface{}:
1272 var mapKeys []string
1273 for key, _ := range vTyped {
1274 mapKeys = append(mapKeys, key)
1275 }
1276 sort.Strings(mapKeys)
1277
1278 var mapBuf bytes.Buffer
1279 mapBuf.WriteString("{")
1280 for _, key := range mapKeys {
1281 mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key]))
1282 }
1283 mapBuf.WriteString("}")
1284
1285 buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String()))
1286 }
1287 }
1288 }
1289
1290 return buf.String()
1291}
1292
1293// ResourceStateKey is a structured representation of the key used for the
1294// ModuleState.Resources mapping
1295type ResourceStateKey struct {
1296 Name string
1297 Type string
1298 Mode config.ResourceMode
1299 Index int
1300}
1301
1302// Equal determines whether two ResourceStateKeys are the same
1303func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool {
1304 if rsk == nil || other == nil {
1305 return false
1306 }
1307 if rsk.Mode != other.Mode {
1308 return false
1309 }
1310 if rsk.Type != other.Type {
1311 return false
1312 }
1313 if rsk.Name != other.Name {
1314 return false
1315 }
1316 if rsk.Index != other.Index {
1317 return false
1318 }
1319 return true
1320}
1321
1322func (rsk *ResourceStateKey) String() string {
1323 if rsk == nil {
1324 return ""
1325 }
1326 var prefix string
1327 switch rsk.Mode {
1328 case config.ManagedResourceMode:
1329 prefix = ""
1330 case config.DataResourceMode:
1331 prefix = "data."
1332 default:
1333 panic(fmt.Errorf("unknown resource mode %s", rsk.Mode))
1334 }
1335 if rsk.Index == -1 {
1336 return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name)
1337 }
1338 return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index)
1339}
1340
1341// ParseResourceStateKey accepts a key in the format used by
1342// ModuleState.Resources and returns a resource name and resource index. In the
1343// state, a resource has the format "type.name.index" or "type.name". In the
1344// latter case, the index is returned as -1.
1345func ParseResourceStateKey(k string) (*ResourceStateKey, error) {
1346 parts := strings.Split(k, ".")
1347 mode := config.ManagedResourceMode
1348 if len(parts) > 0 && parts[0] == "data" {
1349 mode = config.DataResourceMode
1350 // Don't need the constant "data" prefix for parsing
1351 // now that we've figured out the mode.
1352 parts = parts[1:]
1353 }
1354 if len(parts) < 2 || len(parts) > 3 {
1355 return nil, fmt.Errorf("Malformed resource state key: %s", k)
1356 }
1357 rsk := &ResourceStateKey{
1358 Mode: mode,
1359 Type: parts[0],
1360 Name: parts[1],
1361 Index: -1,
1362 }
1363 if len(parts) == 3 {
1364 index, err := strconv.Atoi(parts[2])
1365 if err != nil {
1366 return nil, fmt.Errorf("Malformed resource state key index: %s", k)
1367 }
1368 rsk.Index = index
1369 }
1370 return rsk, nil
1371}
1372
1373// ResourceState holds the state of a resource that is used so that
1374// a provider can find and manage an existing resource as well as for
1375// storing attributes that are used to populate variables of child
1376// resources.
1377//
1378// Attributes has attributes about the created resource that are
1379// queryable in interpolation: "${type.id.attr}"
1380//
1381// Extra is just extra data that a provider can return that we store
1382// for later, but is not exposed in any way to the user.
1383//
1384type ResourceState struct {
1385 // This is filled in and managed by Terraform, and is the resource
1386 // type itself such as "mycloud_instance". If a resource provider sets
1387 // this value, it won't be persisted.
1388 Type string `json:"type"`
1389
1390 // Dependencies are a list of things that this resource relies on
1391 // existing to remain intact. For example: an AWS instance might
1392 // depend on a subnet (which itself might depend on a VPC, and so
1393 // on).
1394 //
1395 // Terraform uses this information to build valid destruction
1396 // orders and to warn the user if they're destroying a resource that
1397 // another resource depends on.
1398 //
1399 // Things can be put into this list that may not be managed by
1400 // Terraform. If Terraform doesn't find a matching ID in the
1401 // overall state, then it assumes it isn't managed and doesn't
1402 // worry about it.
1403 Dependencies []string `json:"depends_on"`
1404
1405 // Primary is the current active instance for this resource.
1406 // It can be replaced but only after a successful creation.
1407 // This is the instances on which providers will act.
1408 Primary *InstanceState `json:"primary"`
1409
1410 // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
1411 // Primary is Deposed to get it out of the way for the replacement Primary to
1412 // be created by Apply. If the replacement Primary creates successfully, the
1413 // Deposed instance is cleaned up.
1414 //
1415 // If there were problems creating the replacement Primary, the Deposed
1416 // instance and the (now tainted) replacement Primary will be swapped so the
1417 // tainted replacement will be cleaned up instead.
1418 //
1419 // An instance will remain in the Deposed list until it is successfully
1420 // destroyed and purged.
1421 Deposed []*InstanceState `json:"deposed"`
1422
1423 // Provider is used when a resource is connected to a provider with an alias.
1424 // If this string is empty, the resource is connected to the default provider,
1425 // e.g. "aws_instance" goes with the "aws" provider.
1426 // If the resource block contained a "provider" key, that value will be set here.
1427 Provider string `json:"provider"`
1428
1429 mu sync.Mutex
1430}
1431
1432func (s *ResourceState) Lock() { s.mu.Lock() }
1433func (s *ResourceState) Unlock() { s.mu.Unlock() }
1434
1435// Equal tests whether two ResourceStates are equal.
1436func (s *ResourceState) Equal(other *ResourceState) bool {
1437 s.Lock()
1438 defer s.Unlock()
1439
1440 if s.Type != other.Type {
1441 return false
1442 }
1443
1444 if s.Provider != other.Provider {
1445 return false
1446 }
1447
1448 // Dependencies must be equal
1449 sort.Strings(s.Dependencies)
1450 sort.Strings(other.Dependencies)
1451 if len(s.Dependencies) != len(other.Dependencies) {
1452 return false
1453 }
1454 for i, d := range s.Dependencies {
1455 if other.Dependencies[i] != d {
1456 return false
1457 }
1458 }
1459
1460 // States must be equal
1461 if !s.Primary.Equal(other.Primary) {
1462 return false
1463 }
1464
1465 return true
1466}
1467
1468// Taint marks a resource as tainted.
1469func (s *ResourceState) Taint() {
1470 s.Lock()
1471 defer s.Unlock()
1472
1473 if s.Primary != nil {
1474 s.Primary.Tainted = true
1475 }
1476}
1477
1478// Untaint unmarks a resource as tainted.
1479func (s *ResourceState) Untaint() {
1480 s.Lock()
1481 defer s.Unlock()
1482
1483 if s.Primary != nil {
1484 s.Primary.Tainted = false
1485 }
1486}
1487
1488func (s *ResourceState) init() {
1489 s.Lock()
1490 defer s.Unlock()
1491
1492 if s.Primary == nil {
1493 s.Primary = &InstanceState{}
1494 }
1495 s.Primary.init()
1496
1497 if s.Dependencies == nil {
1498 s.Dependencies = []string{}
1499 }
1500
1501 if s.Deposed == nil {
1502 s.Deposed = make([]*InstanceState, 0)
1503 }
1504}
1505
1506func (s *ResourceState) deepcopy() *ResourceState {
1507 copy, err := copystructure.Config{Lock: true}.Copy(s)
1508 if err != nil {
1509 panic(err)
1510 }
1511
1512 return copy.(*ResourceState)
1513}
1514
1515// prune is used to remove any instances that are no longer required
1516func (s *ResourceState) prune() {
1517 s.Lock()
1518 defer s.Unlock()
1519
1520 n := len(s.Deposed)
1521 for i := 0; i < n; i++ {
1522 inst := s.Deposed[i]
1523 if inst == nil || inst.ID == "" {
1524 copy(s.Deposed[i:], s.Deposed[i+1:])
1525 s.Deposed[n-1] = nil
1526 n--
1527 i--
1528 }
1529 }
1530 s.Deposed = s.Deposed[:n]
1531
1532 s.Dependencies = uniqueStrings(s.Dependencies)
1533}
1534
1535func (s *ResourceState) sort() {
1536 s.Lock()
1537 defer s.Unlock()
1538
1539 sort.Strings(s.Dependencies)
1540}
1541
1542func (s *ResourceState) String() string {
1543 s.Lock()
1544 defer s.Unlock()
1545
1546 var buf bytes.Buffer
1547 buf.WriteString(fmt.Sprintf("Type = %s", s.Type))
1548 return buf.String()
1549}
1550
1551// InstanceState is used to track the unique state information belonging
1552// to a given instance.
1553type InstanceState struct {
1554 // A unique ID for this resource. This is opaque to Terraform
1555 // and is only meant as a lookup mechanism for the providers.
1556 ID string `json:"id"`
1557
1558 // Attributes are basic information about the resource. Any keys here
1559 // are accessible in variable format within Terraform configurations:
1560 // ${resourcetype.name.attribute}.
1561 Attributes map[string]string `json:"attributes"`
1562
1563 // Ephemeral is used to store any state associated with this instance
1564 // that is necessary for the Terraform run to complete, but is not
1565 // persisted to a state file.
1566 Ephemeral EphemeralState `json:"-"`
1567
1568 // Meta is a simple K/V map that is persisted to the State but otherwise
1569 // ignored by Terraform core. It's meant to be used for accounting by
1570 // external client code. The value here must only contain Go primitives
1571 // and collections.
1572 Meta map[string]interface{} `json:"meta"`
1573
1574 // Tainted is used to mark a resource for recreation.
1575 Tainted bool `json:"tainted"`
1576
1577 mu sync.Mutex
1578}
1579
1580func (s *InstanceState) Lock() { s.mu.Lock() }
1581func (s *InstanceState) Unlock() { s.mu.Unlock() }
1582
1583func (s *InstanceState) init() {
1584 s.Lock()
1585 defer s.Unlock()
1586
1587 if s.Attributes == nil {
1588 s.Attributes = make(map[string]string)
1589 }
1590 if s.Meta == nil {
1591 s.Meta = make(map[string]interface{})
1592 }
1593 s.Ephemeral.init()
1594}
1595
1596// Copy all the Fields from another InstanceState
1597func (s *InstanceState) Set(from *InstanceState) {
1598 s.Lock()
1599 defer s.Unlock()
1600
1601 from.Lock()
1602 defer from.Unlock()
1603
1604 s.ID = from.ID
1605 s.Attributes = from.Attributes
1606 s.Ephemeral = from.Ephemeral
1607 s.Meta = from.Meta
1608 s.Tainted = from.Tainted
1609}
1610
1611func (s *InstanceState) DeepCopy() *InstanceState {
1612 copy, err := copystructure.Config{Lock: true}.Copy(s)
1613 if err != nil {
1614 panic(err)
1615 }
1616
1617 return copy.(*InstanceState)
1618}
1619
1620func (s *InstanceState) Empty() bool {
1621 if s == nil {
1622 return true
1623 }
1624 s.Lock()
1625 defer s.Unlock()
1626
1627 return s.ID == ""
1628}
1629
1630func (s *InstanceState) Equal(other *InstanceState) bool {
1631 // Short circuit some nil checks
1632 if s == nil || other == nil {
1633 return s == other
1634 }
1635 s.Lock()
1636 defer s.Unlock()
1637
1638 // IDs must be equal
1639 if s.ID != other.ID {
1640 return false
1641 }
1642
1643 // Attributes must be equal
1644 if len(s.Attributes) != len(other.Attributes) {
1645 return false
1646 }
1647 for k, v := range s.Attributes {
1648 otherV, ok := other.Attributes[k]
1649 if !ok {
1650 return false
1651 }
1652
1653 if v != otherV {
1654 return false
1655 }
1656 }
1657
1658 // Meta must be equal
1659 if len(s.Meta) != len(other.Meta) {
1660 return false
1661 }
1662 if s.Meta != nil && other.Meta != nil {
1663 // We only do the deep check if both are non-nil. If one is nil
1664 // we treat it as equal since their lengths are both zero (check
1665 // above).
1666 if !reflect.DeepEqual(s.Meta, other.Meta) {
1667 return false
1668 }
1669 }
1670
1671 if s.Tainted != other.Tainted {
1672 return false
1673 }
1674
1675 return true
1676}
1677
1678// MergeDiff takes a ResourceDiff and merges the attributes into
1679// this resource state in order to generate a new state. This new
1680// state can be used to provide updated attribute lookups for
1681// variable interpolation.
1682//
1683// If the diff attribute requires computing the value, and hence
1684// won't be available until apply, the value is replaced with the
1685// computeID.
1686func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState {
1687 result := s.DeepCopy()
1688 if result == nil {
1689 result = new(InstanceState)
1690 }
1691 result.init()
1692
1693 if s != nil {
1694 s.Lock()
1695 defer s.Unlock()
1696 for k, v := range s.Attributes {
1697 result.Attributes[k] = v
1698 }
1699 }
1700 if d != nil {
1701 for k, diff := range d.CopyAttributes() {
1702 if diff.NewRemoved {
1703 delete(result.Attributes, k)
1704 continue
1705 }
1706 if diff.NewComputed {
1707 result.Attributes[k] = config.UnknownVariableValue
1708 continue
1709 }
1710
1711 result.Attributes[k] = diff.New
1712 }
1713 }
1714
1715 return result
1716}
1717
1718func (s *InstanceState) String() string {
1719 s.Lock()
1720 defer s.Unlock()
1721
1722 var buf bytes.Buffer
1723
1724 if s == nil || s.ID == "" {
1725 return "<not created>"
1726 }
1727
1728 buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID))
1729
1730 attributes := s.Attributes
1731 attrKeys := make([]string, 0, len(attributes))
1732 for ak, _ := range attributes {
1733 if ak == "id" {
1734 continue
1735 }
1736
1737 attrKeys = append(attrKeys, ak)
1738 }
1739 sort.Strings(attrKeys)
1740
1741 for _, ak := range attrKeys {
1742 av := attributes[ak]
1743 buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av))
1744 }
1745
1746 buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted))
1747
1748 return buf.String()
1749}
1750
1751// EphemeralState is used for transient state that is only kept in-memory
1752type EphemeralState struct {
1753 // ConnInfo is used for the providers to export information which is
1754 // used to connect to the resource for provisioning. For example,
1755 // this could contain SSH or WinRM credentials.
1756 ConnInfo map[string]string `json:"-"`
1757
1758 // Type is used to specify the resource type for this instance. This is only
1759 // required for import operations (as documented). If the documentation
1760 // doesn't state that you need to set this, then don't worry about
1761 // setting it.
1762 Type string `json:"-"`
1763}
1764
1765func (e *EphemeralState) init() {
1766 if e.ConnInfo == nil {
1767 e.ConnInfo = make(map[string]string)
1768 }
1769}
1770
1771func (e *EphemeralState) DeepCopy() *EphemeralState {
1772 copy, err := copystructure.Config{Lock: true}.Copy(e)
1773 if err != nil {
1774 panic(err)
1775 }
1776
1777 return copy.(*EphemeralState)
1778}
1779
1780type jsonStateVersionIdentifier struct {
1781 Version int `json:"version"`
1782}
1783
1784// Check if this is a V0 format - the magic bytes at the start of the file
1785// should be "tfstate" if so. We no longer support upgrading this type of
1786// state but return an error message explaining to a user how they can
1787// upgrade via the 0.6.x series.
1788func testForV0State(buf *bufio.Reader) error {
1789 start, err := buf.Peek(len("tfstate"))
1790 if err != nil {
1791 return fmt.Errorf("Failed to check for magic bytes: %v", err)
1792 }
1793 if string(start) == "tfstate" {
1794 return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" +
1795 "format which was used prior to Terraform 0.3. Please upgrade\n" +
1796 "this state file using Terraform 0.6.16 prior to using it with\n" +
1797 "Terraform 0.7.")
1798 }
1799
1800 return nil
1801}
1802
1803// ErrNoState is returned by ReadState when the io.Reader contains no data
1804var ErrNoState = errors.New("no state")
1805
1806// ReadState reads a state structure out of a reader in the format that
1807// was written by WriteState.
1808func ReadState(src io.Reader) (*State, error) {
1809 buf := bufio.NewReader(src)
1810 if _, err := buf.Peek(1); err != nil {
1811 // the error is either io.EOF or "invalid argument", and both are from
1812 // an empty state.
1813 return nil, ErrNoState
1814 }
1815
1816 if err := testForV0State(buf); err != nil {
1817 return nil, err
1818 }
1819
1820 // If we are JSON we buffer the whole thing in memory so we can read it twice.
1821 // This is suboptimal, but will work for now.
1822 jsonBytes, err := ioutil.ReadAll(buf)
1823 if err != nil {
1824 return nil, fmt.Errorf("Reading state file failed: %v", err)
1825 }
1826
1827 versionIdentifier := &jsonStateVersionIdentifier{}
1828 if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil {
1829 return nil, fmt.Errorf("Decoding state file version failed: %v", err)
1830 }
1831
1832 var result *State
1833 switch versionIdentifier.Version {
1834 case 0:
1835 return nil, fmt.Errorf("State version 0 is not supported as JSON.")
1836 case 1:
1837 v1State, err := ReadStateV1(jsonBytes)
1838 if err != nil {
1839 return nil, err
1840 }
1841
1842 v2State, err := upgradeStateV1ToV2(v1State)
1843 if err != nil {
1844 return nil, err
1845 }
1846
1847 v3State, err := upgradeStateV2ToV3(v2State)
1848 if err != nil {
1849 return nil, err
1850 }
1851
1852 // increment the Serial whenever we upgrade state
1853 v3State.Serial++
1854 result = v3State
1855 case 2:
1856 v2State, err := ReadStateV2(jsonBytes)
1857 if err != nil {
1858 return nil, err
1859 }
1860 v3State, err := upgradeStateV2ToV3(v2State)
1861 if err != nil {
1862 return nil, err
1863 }
1864
1865 v3State.Serial++
1866 result = v3State
1867 case 3:
1868 v3State, err := ReadStateV3(jsonBytes)
1869 if err != nil {
1870 return nil, err
1871 }
1872
1873 result = v3State
1874 default:
1875 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
1876 SemVersion.String(), versionIdentifier.Version)
1877 }
1878
1879 // If we reached this place we must have a result set
1880 if result == nil {
1881 panic("resulting state in load not set, assertion failed")
1882 }
1883
1884 // Prune the state when read it. Its possible to write unpruned states or
1885 // for a user to make a state unpruned (nil-ing a module state for example).
1886 result.prune()
1887
1888 // Validate the state file is valid
1889 if err := result.Validate(); err != nil {
1890 return nil, err
1891 }
1892
1893 return result, nil
1894}
1895
1896func ReadStateV1(jsonBytes []byte) (*stateV1, error) {
1897 v1State := &stateV1{}
1898 if err := json.Unmarshal(jsonBytes, v1State); err != nil {
1899 return nil, fmt.Errorf("Decoding state file failed: %v", err)
1900 }
1901
1902 if v1State.Version != 1 {
1903 return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+
1904 "read %d, expected 1", v1State.Version)
1905 }
1906
1907 return v1State, nil
1908}
1909
1910func ReadStateV2(jsonBytes []byte) (*State, error) {
1911 state := &State{}
1912 if err := json.Unmarshal(jsonBytes, state); err != nil {
1913 return nil, fmt.Errorf("Decoding state file failed: %v", err)
1914 }
1915
1916 // Check the version, this to ensure we don't read a future
1917 // version that we don't understand
1918 if state.Version > StateVersion {
1919 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
1920 SemVersion.String(), state.Version)
1921 }
1922
1923 // Make sure the version is semantic
1924 if state.TFVersion != "" {
1925 if _, err := version.NewVersion(state.TFVersion); err != nil {
1926 return nil, fmt.Errorf(
1927 "State contains invalid version: %s\n\n"+
1928 "Terraform validates the version format prior to writing it. This\n"+
1929 "means that this is invalid of the state becoming corrupted through\n"+
1930 "some external means. Please manually modify the Terraform version\n"+
1931 "field to be a proper semantic version.",
1932 state.TFVersion)
1933 }
1934 }
1935
1936 // catch any unitialized fields in the state
1937 state.init()
1938
1939 // Sort it
1940 state.sort()
1941
1942 return state, nil
1943}
1944
1945func ReadStateV3(jsonBytes []byte) (*State, error) {
1946 state := &State{}
1947 if err := json.Unmarshal(jsonBytes, state); err != nil {
1948 return nil, fmt.Errorf("Decoding state file failed: %v", err)
1949 }
1950
1951 // Check the version, this to ensure we don't read a future
1952 // version that we don't understand
1953 if state.Version > StateVersion {
1954 return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
1955 SemVersion.String(), state.Version)
1956 }
1957
1958 // Make sure the version is semantic
1959 if state.TFVersion != "" {
1960 if _, err := version.NewVersion(state.TFVersion); err != nil {
1961 return nil, fmt.Errorf(
1962 "State contains invalid version: %s\n\n"+
1963 "Terraform validates the version format prior to writing it. This\n"+
1964 "means that this is invalid of the state becoming corrupted through\n"+
1965 "some external means. Please manually modify the Terraform version\n"+
1966 "field to be a proper semantic version.",
1967 state.TFVersion)
1968 }
1969 }
1970
1971 // catch any unitialized fields in the state
1972 state.init()
1973
1974 // Sort it
1975 state.sort()
1976
1977 // Now we write the state back out to detect any changes in normaliztion.
1978 // If our state is now written out differently, bump the serial number to
1979 // prevent conflicts.
1980 var buf bytes.Buffer
1981 err := WriteState(state, &buf)
1982 if err != nil {
1983 return nil, err
1984 }
1985
1986 if !bytes.Equal(jsonBytes, buf.Bytes()) {
1987 log.Println("[INFO] state modified during read or write. incrementing serial number")
1988 state.Serial++
1989 }
1990
1991 return state, nil
1992}
1993
1994// WriteState writes a state somewhere in a binary format.
1995func WriteState(d *State, dst io.Writer) error {
1996 // writing a nil state is a noop.
1997 if d == nil {
1998 return nil
1999 }
2000
2001 // make sure we have no uninitialized fields
2002 d.init()
2003
2004 // Make sure it is sorted
2005 d.sort()
2006
2007 // Ensure the version is set
2008 d.Version = StateVersion
2009
2010 // If the TFVersion is set, verify it. We used to just set the version
2011 // here, but this isn't safe since it changes the MD5 sum on some remote
2012 // state storage backends such as Atlas. We now leave it be if needed.
2013 if d.TFVersion != "" {
2014 if _, err := version.NewVersion(d.TFVersion); err != nil {
2015 return fmt.Errorf(
2016 "Error writing state, invalid version: %s\n\n"+
2017 "The Terraform version when writing the state must be a semantic\n"+
2018 "version.",
2019 d.TFVersion)
2020 }
2021 }
2022
2023 // Encode the data in a human-friendly way
2024 data, err := json.MarshalIndent(d, "", " ")
2025 if err != nil {
2026 return fmt.Errorf("Failed to encode state: %s", err)
2027 }
2028
2029 // We append a newline to the data because MarshalIndent doesn't
2030 data = append(data, '\n')
2031
2032 // Write the data out to the dst
2033 if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil {
2034 return fmt.Errorf("Failed to write state: %v", err)
2035 }
2036
2037 return nil
2038}
2039
2040// resourceNameSort implements the sort.Interface to sort name parts lexically for
2041// strings and numerically for integer indexes.
2042type resourceNameSort []string
2043
2044func (r resourceNameSort) Len() int { return len(r) }
2045func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
2046
2047func (r resourceNameSort) Less(i, j int) bool {
2048 iParts := strings.Split(r[i], ".")
2049 jParts := strings.Split(r[j], ".")
2050
2051 end := len(iParts)
2052 if len(jParts) < end {
2053 end = len(jParts)
2054 }
2055
2056 for idx := 0; idx < end; idx++ {
2057 if iParts[idx] == jParts[idx] {
2058 continue
2059 }
2060
2061 // sort on the first non-matching part
2062 iInt, iIntErr := strconv.Atoi(iParts[idx])
2063 jInt, jIntErr := strconv.Atoi(jParts[idx])
2064
2065 switch {
2066 case iIntErr == nil && jIntErr == nil:
2067 // sort numerically if both parts are integers
2068 return iInt < jInt
2069 case iIntErr == nil:
2070 // numbers sort before strings
2071 return true
2072 case jIntErr == nil:
2073 return false
2074 default:
2075 return iParts[idx] < jParts[idx]
2076 }
2077 }
2078
2079 return r[i] < r[j]
2080}
2081
2082// moduleStateSort implements sort.Interface to sort module states
2083type moduleStateSort []*ModuleState
2084
2085func (s moduleStateSort) Len() int {
2086 return len(s)
2087}
2088
2089func (s moduleStateSort) Less(i, j int) bool {
2090 a := s[i]
2091 b := s[j]
2092
2093 // If either is nil, then the nil one is "less" than
2094 if a == nil || b == nil {
2095 return a == nil
2096 }
2097
2098 // If the lengths are different, then the shorter one always wins
2099 if len(a.Path) != len(b.Path) {
2100 return len(a.Path) < len(b.Path)
2101 }
2102
2103 // Otherwise, compare lexically
2104 return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
2105}
2106
2107func (s moduleStateSort) Swap(i, j int) {
2108 s[i], s[j] = s[j], s[i]
2109}
2110
2111const stateValidateErrMultiModule = `
2112Multiple modules with the same path: %s
2113
2114This means that there are multiple entries in the "modules" field
2115in your state file that point to the same module. This will cause Terraform
2116to behave in unexpected and error prone ways and is invalid. Please back up
2117and modify your state file manually to resolve this.
2118`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
new file mode 100644
index 0000000..1163730
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
@@ -0,0 +1,374 @@
1package terraform
2
3import "fmt"
4
5// Add adds the item in the state at the given address.
6//
7// The item can be a ModuleState, ResourceState, or InstanceState. Depending
8// on the item type, the address may or may not be valid. For example, a
9// module cannot be moved to a resource address, however a resource can be
10// moved to a module address (it retains the same name, under that resource).
11//
12// The item can also be a []*ModuleState, which is the case for nested
13// modules. In this case, Add will expect the zero-index to be the top-most
14// module to add and will only nest children from there. For semantics, this
15// is equivalent to module => module.
16//
17// The full semantics of Add:
18//
19// ┌───────────────────┬───────────────────┬───────────────────┐
20// │ Module Address │ Resource Address │ Instance Address │
21// ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤
22// │ ModuleState │ ✓ │ x │ x │
23// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
24// │ ResourceState │ ✓ │ ✓ │ maybe* │
25// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
26// │ Instance State │ ✓ │ ✓ │ ✓ │
27// └─────────────────┴───────────────────┴───────────────────┴───────────────────┘
28//
29// *maybe - Resources can be added at an instance address only if the resource
30// represents a single instance (primary). Example:
31// "aws_instance.foo" can be moved to "aws_instance.bar.tainted"
32//
33func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error {
34 // Parse the address
35
36 toAddr, err := ParseResourceAddress(toAddrRaw)
37 if err != nil {
38 return err
39 }
40
41 // Parse the from address
42 fromAddr, err := ParseResourceAddress(fromAddrRaw)
43 if err != nil {
44 return err
45 }
46
47 // Determine the types
48 from := detectValueAddLoc(raw)
49 to := detectAddrAddLoc(toAddr)
50
51 // Find the function to do this
52 fromMap, ok := stateAddFuncs[from]
53 if !ok {
54 return fmt.Errorf("invalid source to add to state: %T", raw)
55 }
56 f, ok := fromMap[to]
57 if !ok {
58 return fmt.Errorf("invalid destination: %s (%d)", toAddr, to)
59 }
60
61 // Call the migrator
62 if err := f(s, fromAddr, toAddr, raw); err != nil {
63 return err
64 }
65
66 // Prune the state
67 s.prune()
68 return nil
69}
70
71func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
72 // raw can be either *ModuleState or []*ModuleState. The former means
73 // we're moving just one module. The latter means we're moving a module
74 // and children.
75 root := raw
76 var rest []*ModuleState
77 if list, ok := raw.([]*ModuleState); ok {
78 // We need at least one item
79 if len(list) == 0 {
80 return fmt.Errorf("module move with no value to: %s", addr)
81 }
82
83 // The first item is always the root
84 root = list[0]
85 if len(list) > 1 {
86 rest = list[1:]
87 }
88 }
89
90 // Get the actual module state
91 src := root.(*ModuleState).deepcopy()
92
93 // If the target module exists, it is an error
94 path := append([]string{"root"}, addr.Path...)
95 if s.ModuleByPath(path) != nil {
96 return fmt.Errorf("module target is not empty: %s", addr)
97 }
98
99 // Create it and copy our outputs and dependencies
100 mod := s.AddModule(path)
101 mod.Outputs = src.Outputs
102 mod.Dependencies = src.Dependencies
103
104 // Go through the resources perform an add for each of those
105 for k, v := range src.Resources {
106 resourceKey, err := ParseResourceStateKey(k)
107 if err != nil {
108 return err
109 }
110
111 // Update the resource address for this
112 addrCopy := *addr
113 addrCopy.Type = resourceKey.Type
114 addrCopy.Name = resourceKey.Name
115 addrCopy.Index = resourceKey.Index
116 addrCopy.Mode = resourceKey.Mode
117
118 // Perform an add
119 if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil {
120 return err
121 }
122 }
123
124 // Add all the children if we have them
125 for _, item := range rest {
126 // If item isn't a descendent of our root, then ignore it
127 if !src.IsDescendent(item) {
128 continue
129 }
130
131 // It is! Strip the leading prefix and attach that to our address
132 extra := item.Path[len(src.Path):]
133 addrCopy := addr.Copy()
134 addrCopy.Path = append(addrCopy.Path, extra...)
135
136 // Add it
137 s.Add(fromAddr.String(), addrCopy.String(), item)
138 }
139
140 return nil
141}
142
143func stateAddFunc_Resource_Module(
144 s *State, from, to *ResourceAddress, raw interface{}) error {
145 // Build the more specific to addr
146 addr := *to
147 addr.Type = from.Type
148 addr.Name = from.Name
149
150 return s.Add(from.String(), addr.String(), raw)
151}
152
153func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
154 // raw can be either *ResourceState or []*ResourceState. The former means
155 // we're moving just one resource. The latter means we're moving a count
156 // of resources.
157 if list, ok := raw.([]*ResourceState); ok {
158 // We need at least one item
159 if len(list) == 0 {
160 return fmt.Errorf("resource move with no value to: %s", addr)
161 }
162
163 // If there is an index, this is an error since we can't assign
164 // a set of resources to a single index
165 if addr.Index >= 0 && len(list) > 1 {
166 return fmt.Errorf(
167 "multiple resources can't be moved to a single index: "+
168 "%s => %s", fromAddr, addr)
169 }
170
171 // Add each with a specific index
172 for i, rs := range list {
173 addrCopy := addr.Copy()
174 addrCopy.Index = i
175
176 if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil {
177 return err
178 }
179 }
180
181 return nil
182 }
183
184 src := raw.(*ResourceState).deepcopy()
185
186 // Initialize the resource
187 resourceRaw, exists := stateAddInitAddr(s, addr)
188 if exists {
189 return fmt.Errorf("resource exists and not empty: %s", addr)
190 }
191 resource := resourceRaw.(*ResourceState)
192 resource.Type = src.Type
193 resource.Dependencies = src.Dependencies
194 resource.Provider = src.Provider
195
196 // Move the primary
197 if src.Primary != nil {
198 addrCopy := *addr
199 addrCopy.InstanceType = TypePrimary
200 addrCopy.InstanceTypeSet = true
201 if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil {
202 return err
203 }
204 }
205
206 // Move all deposed
207 if len(src.Deposed) > 0 {
208 resource.Deposed = src.Deposed
209 }
210
211 return nil
212}
213
214func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
215 src := raw.(*InstanceState).DeepCopy()
216
217 // Create the instance
218 instanceRaw, _ := stateAddInitAddr(s, addr)
219 instance := instanceRaw.(*InstanceState)
220
221 // Set it
222 instance.Set(src)
223
224 return nil
225}
226
227func stateAddFunc_Instance_Module(
228 s *State, from, to *ResourceAddress, raw interface{}) error {
229 addr := *to
230 addr.Type = from.Type
231 addr.Name = from.Name
232
233 return s.Add(from.String(), addr.String(), raw)
234}
235
236func stateAddFunc_Instance_Resource(
237 s *State, from, to *ResourceAddress, raw interface{}) error {
238 addr := *to
239 addr.InstanceType = TypePrimary
240 addr.InstanceTypeSet = true
241
242 return s.Add(from.String(), addr.String(), raw)
243}
244
245// stateAddFunc is the type of function for adding an item to a state
246type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error
247
248// stateAddFuncs has the full matrix mapping of the state adders.
249var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc
250
251func init() {
252 stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{
253 stateAddModule: {
254 stateAddModule: stateAddFunc_Module_Module,
255 },
256 stateAddResource: {
257 stateAddModule: stateAddFunc_Resource_Module,
258 stateAddResource: stateAddFunc_Resource_Resource,
259 },
260 stateAddInstance: {
261 stateAddInstance: stateAddFunc_Instance_Instance,
262 stateAddModule: stateAddFunc_Instance_Module,
263 stateAddResource: stateAddFunc_Instance_Resource,
264 },
265 }
266}
267
268// stateAddLoc is an enum to represent the location where state is being
269// moved from/to. We use this for quick lookups in a function map.
270type stateAddLoc uint
271
272const (
273 stateAddInvalid stateAddLoc = iota
274 stateAddModule
275 stateAddResource
276 stateAddInstance
277)
278
279// detectAddrAddLoc detects the state type for the given address. This
280// function is specifically not unit tested since we consider the State.Add
281// functionality to be comprehensive enough to cover this.
282func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc {
283 if addr.Name == "" {
284 return stateAddModule
285 }
286
287 if !addr.InstanceTypeSet {
288 return stateAddResource
289 }
290
291 return stateAddInstance
292}
293
294// detectValueAddLoc determines the stateAddLoc value from the raw value
295// that is some State structure.
296func detectValueAddLoc(raw interface{}) stateAddLoc {
297 switch raw.(type) {
298 case *ModuleState:
299 return stateAddModule
300 case []*ModuleState:
301 return stateAddModule
302 case *ResourceState:
303 return stateAddResource
304 case []*ResourceState:
305 return stateAddResource
306 case *InstanceState:
307 return stateAddInstance
308 default:
309 return stateAddInvalid
310 }
311}
312
313// stateAddInitAddr takes a ResourceAddress and creates the non-existing
314// resources up to that point, returning the empty (or existing) interface
315// at that address.
316func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) {
317 addType := detectAddrAddLoc(addr)
318
319 // Get the module
320 path := append([]string{"root"}, addr.Path...)
321 exists := true
322 mod := s.ModuleByPath(path)
323 if mod == nil {
324 mod = s.AddModule(path)
325 exists = false
326 }
327 if addType == stateAddModule {
328 return mod, exists
329 }
330
331 // Add the resource
332 resourceKey := (&ResourceStateKey{
333 Name: addr.Name,
334 Type: addr.Type,
335 Index: addr.Index,
336 Mode: addr.Mode,
337 }).String()
338 exists = true
339 resource, ok := mod.Resources[resourceKey]
340 if !ok {
341 resource = &ResourceState{Type: addr.Type}
342 resource.init()
343 mod.Resources[resourceKey] = resource
344 exists = false
345 }
346 if addType == stateAddResource {
347 return resource, exists
348 }
349
350 // Get the instance
351 exists = true
352 instance := &InstanceState{}
353 switch addr.InstanceType {
354 case TypePrimary, TypeTainted:
355 if v := resource.Primary; v != nil {
356 instance = resource.Primary
357 } else {
358 exists = false
359 }
360 case TypeDeposed:
361 idx := addr.Index
362 if addr.Index < 0 {
363 idx = 0
364 }
365 if len(resource.Deposed) > idx {
366 instance = resource.Deposed[idx]
367 } else {
368 resource.Deposed = append(resource.Deposed, instance)
369 exists = false
370 }
371 }
372
373 return instance, exists
374}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go
new file mode 100644
index 0000000..2dcb11b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go
@@ -0,0 +1,267 @@
1package terraform
2
3import (
4 "fmt"
5 "sort"
6)
7
8// StateFilter is responsible for filtering and searching a state.
9//
10// This is a separate struct from State rather than a method on State
11// because StateFilter might create sidecar data structures to optimize
12// filtering on the state.
13//
14// If you change the State, the filter created is invalid and either
15// Reset should be called or a new one should be allocated. StateFilter
16// will not watch State for changes and do this for you. If you filter after
17// changing the State without calling Reset, the behavior is not defined.
18type StateFilter struct {
19 State *State
20}
21
22// Filter takes the addresses specified by fs and finds all the matches.
23// The values of fs are resource addressing syntax that can be parsed by
24// ParseResourceAddress.
25func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) {
26 // Parse all the addresses
27 as := make([]*ResourceAddress, len(fs))
28 for i, v := range fs {
29 a, err := ParseResourceAddress(v)
30 if err != nil {
31 return nil, fmt.Errorf("Error parsing address '%s': %s", v, err)
32 }
33
34 as[i] = a
35 }
36
37 // If we weren't given any filters, then we list all
38 if len(fs) == 0 {
39 as = append(as, &ResourceAddress{Index: -1})
40 }
41
42 // Filter each of the address. We keep track of this in a map to
43 // strip duplicates.
44 resultSet := make(map[string]*StateFilterResult)
45 for _, a := range as {
46 for _, r := range f.filterSingle(a) {
47 resultSet[r.String()] = r
48 }
49 }
50
51 // Make the result list
52 results := make([]*StateFilterResult, 0, len(resultSet))
53 for _, v := range resultSet {
54 results = append(results, v)
55 }
56
57 // Sort them and return
58 sort.Sort(StateFilterResultSlice(results))
59 return results, nil
60}
61
62func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult {
63 // The slice to keep track of results
64 var results []*StateFilterResult
65
66 // Go through modules first.
67 modules := make([]*ModuleState, 0, len(f.State.Modules))
68 for _, m := range f.State.Modules {
69 if f.relevant(a, m) {
70 modules = append(modules, m)
71
72 // Only add the module to the results if we haven't specified a type.
73 // We also ignore the root module.
74 if a.Type == "" && len(m.Path) > 1 {
75 results = append(results, &StateFilterResult{
76 Path: m.Path[1:],
77 Address: (&ResourceAddress{Path: m.Path[1:]}).String(),
78 Value: m,
79 })
80 }
81 }
82 }
83
84 // With the modules set, go through all the resources within
85 // the modules to find relevant resources.
86 for _, m := range modules {
87 for n, r := range m.Resources {
88 // The name in the state contains valuable information. Parse.
89 key, err := ParseResourceStateKey(n)
90 if err != nil {
91 // If we get an error parsing, then just ignore it
92 // out of the state.
93 continue
94 }
95
96 // Older states and test fixtures often don't contain the
97 // type directly on the ResourceState. We add this so StateFilter
98 // is a bit more robust.
99 if r.Type == "" {
100 r.Type = key.Type
101 }
102
103 if f.relevant(a, r) {
104 if a.Name != "" && a.Name != key.Name {
105 // Name doesn't match
106 continue
107 }
108
109 if a.Index >= 0 && key.Index != a.Index {
110 // Index doesn't match
111 continue
112 }
113
114 if a.Name != "" && a.Name != key.Name {
115 continue
116 }
117
118 // Build the address for this resource
119 addr := &ResourceAddress{
120 Path: m.Path[1:],
121 Name: key.Name,
122 Type: key.Type,
123 Index: key.Index,
124 }
125
126 // Add the resource level result
127 resourceResult := &StateFilterResult{
128 Path: addr.Path,
129 Address: addr.String(),
130 Value: r,
131 }
132 if !a.InstanceTypeSet {
133 results = append(results, resourceResult)
134 }
135
136 // Add the instances
137 if r.Primary != nil {
138 addr.InstanceType = TypePrimary
139 addr.InstanceTypeSet = false
140 results = append(results, &StateFilterResult{
141 Path: addr.Path,
142 Address: addr.String(),
143 Parent: resourceResult,
144 Value: r.Primary,
145 })
146 }
147
148 for _, instance := range r.Deposed {
149 if f.relevant(a, instance) {
150 addr.InstanceType = TypeDeposed
151 addr.InstanceTypeSet = true
152 results = append(results, &StateFilterResult{
153 Path: addr.Path,
154 Address: addr.String(),
155 Parent: resourceResult,
156 Value: instance,
157 })
158 }
159 }
160 }
161 }
162 }
163
164 return results
165}
166
167// relevant checks for relevance of this address against the given value.
168func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool {
169 switch v := raw.(type) {
170 case *ModuleState:
171 path := v.Path[1:]
172
173 if len(addr.Path) > len(path) {
174 // Longer path in address means there is no way we match.
175 return false
176 }
177
178 // Check for a prefix match
179 for i, p := range addr.Path {
180 if path[i] != p {
181 // Any mismatches don't match.
182 return false
183 }
184 }
185
186 return true
187 case *ResourceState:
188 if addr.Type == "" {
189 // If we have no resource type, then we're interested in all!
190 return true
191 }
192
193 // If the type doesn't match we fail immediately
194 if v.Type != addr.Type {
195 return false
196 }
197
198 return true
199 default:
200 // If we don't know about it, let's just say no
201 return false
202 }
203}
204
205// StateFilterResult is a single result from a filter operation. Filter
206// can match multiple things within a state (module, resource, instance, etc.)
207// and this unifies that.
208type StateFilterResult struct {
209 // Module path of the result
210 Path []string
211
212 // Address is the address that can be used to reference this exact result.
213 Address string
214
215 // Parent, if non-nil, is a parent of this result. For instances, the
216 // parent would be a resource. For resources, the parent would be
217 // a module. For modules, this is currently nil.
218 Parent *StateFilterResult
219
220 // Value is the actual value. This must be type switched on. It can be
221 // any data structures that `State` can hold: `ModuleState`,
222 // `ResourceState`, `InstanceState`.
223 Value interface{}
224}
225
226func (r *StateFilterResult) String() string {
227 return fmt.Sprintf("%T: %s", r.Value, r.Address)
228}
229
230func (r *StateFilterResult) sortedType() int {
231 switch r.Value.(type) {
232 case *ModuleState:
233 return 0
234 case *ResourceState:
235 return 1
236 case *InstanceState:
237 return 2
238 default:
239 return 50
240 }
241}
242
243// StateFilterResultSlice is a slice of results that implements
244// sort.Interface. The sorting goal is what is most appealing to
245// human output.
246type StateFilterResultSlice []*StateFilterResult
247
248func (s StateFilterResultSlice) Len() int { return len(s) }
249func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
250func (s StateFilterResultSlice) Less(i, j int) bool {
251 a, b := s[i], s[j]
252
253 // if these address contain an index, we want to sort by index rather than name
254 addrA, errA := ParseResourceAddress(a.Address)
255 addrB, errB := ParseResourceAddress(b.Address)
256 if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index {
257 return addrA.Index < addrB.Index
258 }
259
260 // If the addresses are different it is just lexographic sorting
261 if a.Address != b.Address {
262 return a.Address < b.Address
263 }
264
265 // Addresses are the same, which means it matters on the type
266 return a.sortedType() < b.sortedType()
267}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go
new file mode 100644
index 0000000..aa13cce
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go
@@ -0,0 +1,189 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/mitchellh/copystructure"
7)
8
9// upgradeStateV1ToV2 is used to upgrade a V1 state representation
10// into a V2 state representation
11func upgradeStateV1ToV2(old *stateV1) (*State, error) {
12 if old == nil {
13 return nil, nil
14 }
15
16 remote, err := old.Remote.upgradeToV2()
17 if err != nil {
18 return nil, fmt.Errorf("Error upgrading State V1: %v", err)
19 }
20
21 modules := make([]*ModuleState, len(old.Modules))
22 for i, module := range old.Modules {
23 upgraded, err := module.upgradeToV2()
24 if err != nil {
25 return nil, fmt.Errorf("Error upgrading State V1: %v", err)
26 }
27 modules[i] = upgraded
28 }
29 if len(modules) == 0 {
30 modules = nil
31 }
32
33 newState := &State{
34 Version: 2,
35 Serial: old.Serial,
36 Remote: remote,
37 Modules: modules,
38 }
39
40 newState.sort()
41 newState.init()
42
43 return newState, nil
44}
45
46func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) {
47 if old == nil {
48 return nil, nil
49 }
50
51 config, err := copystructure.Copy(old.Config)
52 if err != nil {
53 return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err)
54 }
55
56 return &RemoteState{
57 Type: old.Type,
58 Config: config.(map[string]string),
59 }, nil
60}
61
62func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) {
63 if old == nil {
64 return nil, nil
65 }
66
67 pathRaw, err := copystructure.Copy(old.Path)
68 if err != nil {
69 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
70 }
71 path, ok := pathRaw.([]string)
72 if !ok {
73 return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings")
74 }
75 if len(path) == 0 {
76 // We found some V1 states with a nil path. Assume root and catch
77 // duplicate path errors later (as part of Validate).
78 path = rootModulePath
79 }
80
81 // Outputs needs upgrading to use the new structure
82 outputs := make(map[string]*OutputState)
83 for key, output := range old.Outputs {
84 outputs[key] = &OutputState{
85 Type: "string",
86 Value: output,
87 Sensitive: false,
88 }
89 }
90
91 resources := make(map[string]*ResourceState)
92 for key, oldResource := range old.Resources {
93 upgraded, err := oldResource.upgradeToV2()
94 if err != nil {
95 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
96 }
97 resources[key] = upgraded
98 }
99
100 dependencies, err := copystructure.Copy(old.Dependencies)
101 if err != nil {
102 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
103 }
104
105 return &ModuleState{
106 Path: path,
107 Outputs: outputs,
108 Resources: resources,
109 Dependencies: dependencies.([]string),
110 }, nil
111}
112
113func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) {
114 if old == nil {
115 return nil, nil
116 }
117
118 dependencies, err := copystructure.Copy(old.Dependencies)
119 if err != nil {
120 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
121 }
122
123 primary, err := old.Primary.upgradeToV2()
124 if err != nil {
125 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
126 }
127
128 deposed := make([]*InstanceState, len(old.Deposed))
129 for i, v := range old.Deposed {
130 upgraded, err := v.upgradeToV2()
131 if err != nil {
132 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
133 }
134 deposed[i] = upgraded
135 }
136 if len(deposed) == 0 {
137 deposed = nil
138 }
139
140 return &ResourceState{
141 Type: old.Type,
142 Dependencies: dependencies.([]string),
143 Primary: primary,
144 Deposed: deposed,
145 Provider: old.Provider,
146 }, nil
147}
148
149func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) {
150 if old == nil {
151 return nil, nil
152 }
153
154 attributes, err := copystructure.Copy(old.Attributes)
155 if err != nil {
156 return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
157 }
158 ephemeral, err := old.Ephemeral.upgradeToV2()
159 if err != nil {
160 return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
161 }
162
163 meta, err := copystructure.Copy(old.Meta)
164 if err != nil {
165 return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
166 }
167
168 newMeta := make(map[string]interface{})
169 for k, v := range meta.(map[string]string) {
170 newMeta[k] = v
171 }
172
173 return &InstanceState{
174 ID: old.ID,
175 Attributes: attributes.(map[string]string),
176 Ephemeral: *ephemeral,
177 Meta: newMeta,
178 }, nil
179}
180
181func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) {
182 connInfo, err := copystructure.Copy(old.ConnInfo)
183 if err != nil {
184 return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err)
185 }
186 return &EphemeralState{
187 ConnInfo: connInfo.(map[string]string),
188 }, nil
189}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go
new file mode 100644
index 0000000..e52d35f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go
@@ -0,0 +1,142 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "regexp"
7 "sort"
8 "strconv"
9 "strings"
10)
11
12// The upgrade process from V2 to V3 state does not affect the structure,
13// so we do not need to redeclare all of the structs involved - we just
14// take a deep copy of the old structure and assert the version number is
15// as we expect.
16func upgradeStateV2ToV3(old *State) (*State, error) {
17 new := old.DeepCopy()
18
19 // Ensure the copied version is v2 before attempting to upgrade
20 if new.Version != 2 {
21 return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " +
22 "a state which is not version 2.")
23 }
24
25 // Set the new version number
26 new.Version = 3
27
28 // Change the counts for things which look like maps to use the %
29 // syntax. Remove counts for empty collections - they will be added
30 // back in later.
31 for _, module := range new.Modules {
32 for _, resource := range module.Resources {
33 // Upgrade Primary
34 if resource.Primary != nil {
35 upgradeAttributesV2ToV3(resource.Primary)
36 }
37
38 // Upgrade Deposed
39 if resource.Deposed != nil {
40 for _, deposed := range resource.Deposed {
41 upgradeAttributesV2ToV3(deposed)
42 }
43 }
44 }
45 }
46
47 return new, nil
48}
49
50func upgradeAttributesV2ToV3(instanceState *InstanceState) error {
51 collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`)
52 collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`)
53
54 // Identify the key prefix of anything which is a collection
55 var collectionKeyPrefixes []string
56 for key := range instanceState.Attributes {
57 if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
58 collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1])
59 }
60 }
61 sort.Strings(collectionKeyPrefixes)
62
63 log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes)
64
65 // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not
66 // run very often.
67 for _, prefix := range collectionKeyPrefixes {
68 // First get the actual keys that belong to this prefix
69 var potentialKeysMatching []string
70 for key := range instanceState.Attributes {
71 if strings.HasPrefix(key, prefix) {
72 potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix))
73 }
74 }
75 sort.Strings(potentialKeysMatching)
76
77 var actualKeysMatching []string
78 for _, key := range potentialKeysMatching {
79 if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
80 actualKeysMatching = append(actualKeysMatching, submatches[0][1])
81 } else {
82 if key != "#" {
83 actualKeysMatching = append(actualKeysMatching, key)
84 }
85 }
86 }
87 actualKeysMatching = uniqueSortedStrings(actualKeysMatching)
88
89 // Now inspect the keys in order to determine whether this is most likely to be
90 // a map, list or set. There is room for error here, so we log in each case. If
91 // there is no method of telling, we remove the key from the InstanceState in
92 // order that it will be recreated. Again, this could be rolled into fewer loops
93 // but we prefer clarity.
94
95 oldCountKey := fmt.Sprintf("%s#", prefix)
96
97 // First, detect "obvious" maps - which have non-numeric keys (mostly).
98 hasNonNumericKeys := false
99 for _, key := range actualKeysMatching {
100 if _, err := strconv.Atoi(key); err != nil {
101 hasNonNumericKeys = true
102 }
103 }
104 if hasNonNumericKeys {
105 newCountKey := fmt.Sprintf("%s%%", prefix)
106
107 instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey]
108 delete(instanceState.Attributes, oldCountKey)
109 log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s",
110 strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey])
111 }
112
113 // Now detect empty collections and remove them from state.
114 if len(actualKeysMatching) == 0 {
115 delete(instanceState.Attributes, oldCountKey)
116 log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.",
117 strings.TrimSuffix(prefix, "."))
118 }
119 }
120
121 return nil
122}
123
124// uniqueSortedStrings removes duplicates from a slice of strings and returns
125// a sorted slice of the unique strings.
126func uniqueSortedStrings(input []string) []string {
127 uniquemap := make(map[string]struct{})
128 for _, str := range input {
129 uniquemap[str] = struct{}{}
130 }
131
132 output := make([]string, len(uniquemap))
133
134 i := 0
135 for key := range uniquemap {
136 output[i] = key
137 i = i + 1
138 }
139
140 sort.Strings(output)
141 return output
142}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go
new file mode 100644
index 0000000..68cffb4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go
@@ -0,0 +1,145 @@
1package terraform
2
3// stateV1 keeps track of a snapshot state-of-the-world that Terraform
4// can use to keep track of what real world resources it is actually
5// managing.
6//
7// stateV1 is _only used for the purposes of backwards compatibility
8// and is no longer used in Terraform.
9//
10// For the upgrade process, see state_upgrade_v1_to_v2.go
11type stateV1 struct {
12 // Version is the protocol version. "1" for a StateV1.
13 Version int `json:"version"`
14
15 // Serial is incremented on any operation that modifies
16 // the State file. It is used to detect potentially conflicting
17 // updates.
18 Serial int64 `json:"serial"`
19
20 // Remote is used to track the metadata required to
21 // pull and push state files from a remote storage endpoint.
22 Remote *remoteStateV1 `json:"remote,omitempty"`
23
24 // Modules contains all the modules in a breadth-first order
25 Modules []*moduleStateV1 `json:"modules"`
26}
27
28type remoteStateV1 struct {
29 // Type controls the client we use for the remote state
30 Type string `json:"type"`
31
32 // Config is used to store arbitrary configuration that
33 // is type specific
34 Config map[string]string `json:"config"`
35}
36
37type moduleStateV1 struct {
38 // Path is the import path from the root module. Modules imports are
39 // always disjoint, so the path represents amodule tree
40 Path []string `json:"path"`
41
42 // Outputs declared by the module and maintained for each module
43 // even though only the root module technically needs to be kept.
44 // This allows operators to inspect values at the boundaries.
45 Outputs map[string]string `json:"outputs"`
46
47 // Resources is a mapping of the logically named resource to
48 // the state of the resource. Each resource may actually have
49 // N instances underneath, although a user only needs to think
50 // about the 1:1 case.
51 Resources map[string]*resourceStateV1 `json:"resources"`
52
53 // Dependencies are a list of things that this module relies on
54 // existing to remain intact. For example: an module may depend
55 // on a VPC ID given by an aws_vpc resource.
56 //
57 // Terraform uses this information to build valid destruction
58 // orders and to warn the user if they're destroying a module that
59 // another resource depends on.
60 //
61 // Things can be put into this list that may not be managed by
62 // Terraform. If Terraform doesn't find a matching ID in the
63 // overall state, then it assumes it isn't managed and doesn't
64 // worry about it.
65 Dependencies []string `json:"depends_on,omitempty"`
66}
67
68type resourceStateV1 struct {
69 // This is filled in and managed by Terraform, and is the resource
70 // type itself such as "mycloud_instance". If a resource provider sets
71 // this value, it won't be persisted.
72 Type string `json:"type"`
73
74 // Dependencies are a list of things that this resource relies on
75 // existing to remain intact. For example: an AWS instance might
76 // depend on a subnet (which itself might depend on a VPC, and so
77 // on).
78 //
79 // Terraform uses this information to build valid destruction
80 // orders and to warn the user if they're destroying a resource that
81 // another resource depends on.
82 //
83 // Things can be put into this list that may not be managed by
84 // Terraform. If Terraform doesn't find a matching ID in the
85 // overall state, then it assumes it isn't managed and doesn't
86 // worry about it.
87 Dependencies []string `json:"depends_on,omitempty"`
88
89 // Primary is the current active instance for this resource.
90 // It can be replaced but only after a successful creation.
91 // This is the instances on which providers will act.
92 Primary *instanceStateV1 `json:"primary"`
93
94 // Tainted is used to track any underlying instances that
95 // have been created but are in a bad or unknown state and
96 // need to be cleaned up subsequently. In the
97 // standard case, there is only at most a single instance.
98 // However, in pathological cases, it is possible for the number
99 // of instances to accumulate.
100 Tainted []*instanceStateV1 `json:"tainted,omitempty"`
101
102 // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
103 // Primary is Deposed to get it out of the way for the replacement Primary to
104 // be created by Apply. If the replacement Primary creates successfully, the
105 // Deposed instance is cleaned up. If there were problems creating the
106 // replacement, the instance remains in the Deposed list so it can be
107 // destroyed in a future run. Functionally, Deposed instances are very
108 // similar to Tainted instances in that Terraform is only tracking them in
109 // order to remember to destroy them.
110 Deposed []*instanceStateV1 `json:"deposed,omitempty"`
111
112 // Provider is used when a resource is connected to a provider with an alias.
113 // If this string is empty, the resource is connected to the default provider,
114 // e.g. "aws_instance" goes with the "aws" provider.
115 // If the resource block contained a "provider" key, that value will be set here.
116 Provider string `json:"provider,omitempty"`
117}
118
119type instanceStateV1 struct {
120 // A unique ID for this resource. This is opaque to Terraform
121 // and is only meant as a lookup mechanism for the providers.
122 ID string `json:"id"`
123
124 // Attributes are basic information about the resource. Any keys here
125 // are accessible in variable format within Terraform configurations:
126 // ${resourcetype.name.attribute}.
127 Attributes map[string]string `json:"attributes,omitempty"`
128
129 // Ephemeral is used to store any state associated with this instance
130 // that is necessary for the Terraform run to complete, but is not
131 // persisted to a state file.
132 Ephemeral ephemeralStateV1 `json:"-"`
133
134 // Meta is a simple K/V map that is persisted to the State but otherwise
135 // ignored by Terraform core. It's meant to be used for accounting by
136 // external client code.
137 Meta map[string]string `json:"meta,omitempty"`
138}
139
140type ephemeralStateV1 struct {
141 // ConnInfo is used for the providers to export information which is
142 // used to connect to the resource for provisioning. For example,
143 // this could contain SSH or WinRM credentials.
144 ConnInfo map[string]string `json:"-"`
145}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/testing.go b/vendor/github.com/hashicorp/terraform/terraform/testing.go
new file mode 100644
index 0000000..3f0418d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/testing.go
@@ -0,0 +1,19 @@
1package terraform
2
3import (
4 "os"
5 "testing"
6)
7
8// TestStateFile writes the given state to the path.
9func TestStateFile(t *testing.T, path string, state *State) {
10 f, err := os.Create(path)
11 if err != nil {
12 t.Fatalf("err: %s", err)
13 }
14 defer f.Close()
15
16 if err := WriteState(state, f); err != nil {
17 t.Fatalf("err: %s", err)
18 }
19}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go
new file mode 100644
index 0000000..f4a431a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go
@@ -0,0 +1,52 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// GraphTransformer is the interface that transformers implement. This
8// interface is only for transforms that need entire graph visibility.
9type GraphTransformer interface {
10 Transform(*Graph) error
11}
12
13// GraphVertexTransformer is an interface that transforms a single
14// Vertex within with graph. This is a specialization of GraphTransformer
15// that makes it easy to do vertex replacement.
16//
17// The GraphTransformer that runs through the GraphVertexTransformers is
18// VertexTransformer.
19type GraphVertexTransformer interface {
20 Transform(dag.Vertex) (dag.Vertex, error)
21}
22
23// GraphTransformIf is a helper function that conditionally returns a
24// GraphTransformer given. This is useful for calling inline a sequence
25// of transforms without having to split it up into multiple append() calls.
26func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer {
27 if f() {
28 return then
29 }
30
31 return nil
32}
33
34type graphTransformerMulti struct {
35 Transforms []GraphTransformer
36}
37
38func (t *graphTransformerMulti) Transform(g *Graph) error {
39 for _, t := range t.Transforms {
40 if err := t.Transform(g); err != nil {
41 return err
42 }
43 }
44
45 return nil
46}
47
48// GraphTransformMulti combines multiple graph transformers into a single
49// GraphTransformer that runs all the individual graph transformers.
50func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer {
51 return &graphTransformerMulti{Transforms: ts}
52}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
new file mode 100644
index 0000000..10506ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
@@ -0,0 +1,80 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/config/module"
8)
9
10// GraphNodeAttachProvider is an interface that must be implemented by nodes
11// that want provider configurations attached.
12type GraphNodeAttachProvider interface {
13 // Must be implemented to determine the path for the configuration
14 GraphNodeSubPath
15
16 // ProviderName with no module prefix. Example: "aws".
17 ProviderName() string
18
19 // Sets the configuration
20 AttachProvider(*config.ProviderConfig)
21}
22
23// AttachProviderConfigTransformer goes through the graph and attaches
24// provider configuration structures to nodes that implement the interfaces
25// above.
26//
27// The attached configuration structures are directly from the configuration.
28// If they're going to be modified, a copy should be made.
29type AttachProviderConfigTransformer struct {
30 Module *module.Tree // Module is the root module for the config
31}
32
33func (t *AttachProviderConfigTransformer) Transform(g *Graph) error {
34 if err := t.attachProviders(g); err != nil {
35 return err
36 }
37
38 return nil
39}
40
41func (t *AttachProviderConfigTransformer) attachProviders(g *Graph) error {
42 // Go through and find GraphNodeAttachProvider
43 for _, v := range g.Vertices() {
44 // Only care about GraphNodeAttachProvider implementations
45 apn, ok := v.(GraphNodeAttachProvider)
46 if !ok {
47 continue
48 }
49
50 // Determine what we're looking for
51 path := normalizeModulePath(apn.Path())
52 path = path[1:]
53 name := apn.ProviderName()
54 log.Printf("[TRACE] Attach provider request: %#v %s", path, name)
55
56 // Get the configuration.
57 tree := t.Module.Child(path)
58 if tree == nil {
59 continue
60 }
61
62 // Go through the provider configs to find the matching config
63 for _, p := range tree.Config().ProviderConfigs {
64 // Build the name, which is "name.alias" if an alias exists
65 current := p.Name
66 if p.Alias != "" {
67 current += "." + p.Alias
68 }
69
70 // If the configs match then attach!
71 if current == name {
72 log.Printf("[TRACE] Attaching provider config: %#v", p)
73 apn.AttachProvider(p)
74 break
75 }
76 }
77 }
78
79 return nil
80}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
new file mode 100644
index 0000000..f2ee37e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
@@ -0,0 +1,78 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/config/module"
9)
10
11// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes
12// that want resource configurations attached.
13type GraphNodeAttachResourceConfig interface {
14 // ResourceAddr is the address to the resource
15 ResourceAddr() *ResourceAddress
16
17 // Sets the configuration
18 AttachResourceConfig(*config.Resource)
19}
20
21// AttachResourceConfigTransformer goes through the graph and attaches
22// resource configuration structures to nodes that implement the interfaces
23// above.
24//
25// The attached configuration structures are directly from the configuration.
26// If they're going to be modified, a copy should be made.
27type AttachResourceConfigTransformer struct {
28 Module *module.Tree // Module is the root module for the config
29}
30
31func (t *AttachResourceConfigTransformer) Transform(g *Graph) error {
32 log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...")
33
34 // Go through and find GraphNodeAttachResource
35 for _, v := range g.Vertices() {
36 // Only care about GraphNodeAttachResource implementations
37 arn, ok := v.(GraphNodeAttachResourceConfig)
38 if !ok {
39 continue
40 }
41
42 // Determine what we're looking for
43 addr := arn.ResourceAddr()
44 log.Printf(
45 "[TRACE] AttachResourceConfigTransformer: Attach resource "+
46 "config request: %s", addr)
47
48 // Get the configuration.
49 path := normalizeModulePath(addr.Path)
50 path = path[1:]
51 tree := t.Module.Child(path)
52 if tree == nil {
53 continue
54 }
55
56 // Go through the resource configs to find the matching config
57 for _, r := range tree.Config().Resources {
58 // Get a resource address so we can compare
59 a, err := parseResourceAddressConfig(r)
60 if err != nil {
61 panic(fmt.Sprintf(
62 "Error parsing config address, this is a bug: %#v", r))
63 }
64 a.Path = addr.Path
65
66 // If this is not the same resource, then continue
67 if !a.Equals(addr) {
68 continue
69 }
70
71 log.Printf("[TRACE] Attaching resource config: %#v", r)
72 arn.AttachResourceConfig(r)
73 break
74 }
75 }
76
77 return nil
78}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
new file mode 100644
index 0000000..564ff08
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
@@ -0,0 +1,68 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// GraphNodeAttachResourceState is an interface that can be implemented
10// to request that a ResourceState is attached to the node.
11type GraphNodeAttachResourceState interface {
12 // The address to the resource for the state
13 ResourceAddr() *ResourceAddress
14
15 // Sets the state
16 AttachResourceState(*ResourceState)
17}
18
19// AttachStateTransformer goes through the graph and attaches
20// state to nodes that implement the interfaces above.
21type AttachStateTransformer struct {
22 State *State // State is the root state
23}
24
25func (t *AttachStateTransformer) Transform(g *Graph) error {
26 // If no state, then nothing to do
27 if t.State == nil {
28 log.Printf("[DEBUG] Not attaching any state: state is nil")
29 return nil
30 }
31
32 filter := &StateFilter{State: t.State}
33 for _, v := range g.Vertices() {
34 // Only care about nodes requesting we're adding state
35 an, ok := v.(GraphNodeAttachResourceState)
36 if !ok {
37 continue
38 }
39 addr := an.ResourceAddr()
40
41 // Get the module state
42 results, err := filter.Filter(addr.String())
43 if err != nil {
44 return err
45 }
46
47 // Attach the first resource state we get
48 found := false
49 for _, result := range results {
50 if rs, ok := result.Value.(*ResourceState); ok {
51 log.Printf(
52 "[DEBUG] Attaching resource state to %q: %#v",
53 dag.VertexName(v), rs)
54 an.AttachResourceState(rs)
55 found = true
56 break
57 }
58 }
59
60 if !found {
61 log.Printf(
62 "[DEBUG] Resource state not found for %q: %s",
63 dag.VertexName(v), addr)
64 }
65 }
66
67 return nil
68}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
new file mode 100644
index 0000000..61bce85
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
@@ -0,0 +1,135 @@
1package terraform
2
3import (
4 "errors"
5 "fmt"
6 "log"
7 "sync"
8
9 "github.com/hashicorp/terraform/config"
10 "github.com/hashicorp/terraform/config/module"
11 "github.com/hashicorp/terraform/dag"
12)
13
14// ConfigTransformer is a GraphTransformer that adds all the resources
15// from the configuration to the graph.
16//
17// The module used to configure this transformer must be the root module.
18//
19// Only resources are added to the graph. Variables, outputs, and
20// providers must be added via other transforms.
21//
22// Unlike ConfigTransformerOld, this transformer creates a graph with
23// all resources including module resources, rather than creating module
24// nodes that are then "flattened".
25type ConfigTransformer struct {
26 Concrete ConcreteResourceNodeFunc
27
28 // Module is the module to add resources from.
29 Module *module.Tree
30
31 // Unique will only add resources that aren't already present in the graph.
32 Unique bool
33
34 // Mode will only add resources that match the given mode
35 ModeFilter bool
36 Mode config.ResourceMode
37
38 l sync.Mutex
39 uniqueMap map[string]struct{}
40}
41
42func (t *ConfigTransformer) Transform(g *Graph) error {
43 // Lock since we use some internal state
44 t.l.Lock()
45 defer t.l.Unlock()
46
47 // If no module is given, we don't do anything
48 if t.Module == nil {
49 return nil
50 }
51
52 // If the module isn't loaded, that is simply an error
53 if !t.Module.Loaded() {
54 return errors.New("module must be loaded for ConfigTransformer")
55 }
56
57 // Reset the uniqueness map. If we're tracking uniques, then populate
58 // it with addresses.
59 t.uniqueMap = make(map[string]struct{})
60 defer func() { t.uniqueMap = nil }()
61 if t.Unique {
62 for _, v := range g.Vertices() {
63 if rn, ok := v.(GraphNodeResource); ok {
64 t.uniqueMap[rn.ResourceAddr().String()] = struct{}{}
65 }
66 }
67 }
68
69 // Start the transformation process
70 return t.transform(g, t.Module)
71}
72
73func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error {
74 // If no config, do nothing
75 if m == nil {
76 return nil
77 }
78
79 // Add our resources
80 if err := t.transformSingle(g, m); err != nil {
81 return err
82 }
83
84 // Transform all the children.
85 for _, c := range m.Children() {
86 if err := t.transform(g, c); err != nil {
87 return err
88 }
89 }
90
91 return nil
92}
93
94func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error {
95 log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path())
96
97 // Get the configuration for this module
98 conf := m.Config()
99
100 // Build the path we're at
101 path := m.Path()
102
103 // Write all the resources out
104 for _, r := range conf.Resources {
105 // Build the resource address
106 addr, err := parseResourceAddressConfig(r)
107 if err != nil {
108 panic(fmt.Sprintf(
109 "Error parsing config address, this is a bug: %#v", r))
110 }
111 addr.Path = path
112
113 // If this is already in our uniqueness map, don't add it again
114 if _, ok := t.uniqueMap[addr.String()]; ok {
115 continue
116 }
117
118 // Remove non-matching modes
119 if t.ModeFilter && addr.Mode != t.Mode {
120 continue
121 }
122
123 // Build the abstract node and the concrete one
124 abstract := &NodeAbstractResource{Addr: addr}
125 var node dag.Vertex = abstract
126 if f := t.Concrete; f != nil {
127 node = f(abstract)
128 }
129
130 // Add it to the graph
131 g.Add(node)
132 }
133
134 return nil
135}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
new file mode 100644
index 0000000..92f9888
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
@@ -0,0 +1,80 @@
1package terraform
2
3import (
4 "errors"
5
6 "github.com/hashicorp/terraform/config/module"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// FlatConfigTransformer is a GraphTransformer that adds the configuration
11// to the graph. The module used to configure this transformer must be
12// the root module.
13//
14// This transform adds the nodes but doesn't connect any of the references.
15// The ReferenceTransformer should be used for that.
16//
17// NOTE: In relation to ConfigTransformer: this is a newer generation config
18// transformer. It puts the _entire_ config into the graph (there is no
19// "flattening" step as before).
20type FlatConfigTransformer struct {
21 Concrete ConcreteResourceNodeFunc // What to turn resources into
22
23 Module *module.Tree
24}
25
26func (t *FlatConfigTransformer) Transform(g *Graph) error {
27 // If no module, we do nothing
28 if t.Module == nil {
29 return nil
30 }
31
32 // If the module is not loaded, that is an error
33 if !t.Module.Loaded() {
34 return errors.New("module must be loaded")
35 }
36
37 return t.transform(g, t.Module)
38}
39
40func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error {
41 // If no module, no problem
42 if m == nil {
43 return nil
44 }
45
46 // Transform all the children.
47 for _, c := range m.Children() {
48 if err := t.transform(g, c); err != nil {
49 return err
50 }
51 }
52
53 // Get the configuration for this module
54 config := m.Config()
55
56 // Write all the resources out
57 for _, r := range config.Resources {
58 // Grab the address for this resource
59 addr, err := parseResourceAddressConfig(r)
60 if err != nil {
61 return err
62 }
63 addr.Path = m.Path()
64
65 // Build the abstract resource. We have the config already so
66 // we'll just pre-populate that.
67 abstract := &NodeAbstractResource{
68 Addr: addr,
69 Config: r,
70 }
71 var node dag.Vertex = abstract
72 if f := t.Concrete; f != nil {
73 node = f(abstract)
74 }
75
76 g.Add(node)
77 }
78
79 return nil
80}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go
new file mode 100644
index 0000000..ec41258
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go
@@ -0,0 +1,23 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// varNameForVar returns the VarName value for an interpolated variable.
10// This value is compared to the VarName() value for the nodes within the
11// graph to build the graph edges.
12func varNameForVar(raw config.InterpolatedVariable) string {
13 switch v := raw.(type) {
14 case *config.ModuleVariable:
15 return fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)
16 case *config.ResourceVariable:
17 return v.ResourceId()
18 case *config.UserVariable:
19 return fmt.Sprintf("var.%s", v.Name)
20 default:
21 return ""
22 }
23}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
new file mode 100644
index 0000000..83415f3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
@@ -0,0 +1,28 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// CountBoundaryTransformer adds a node that depends on everything else
8// so that it runs last in order to clean up the state for nodes that
9// are on the "count boundary": "foo.0" when only one exists becomes "foo"
10type CountBoundaryTransformer struct{}
11
12func (t *CountBoundaryTransformer) Transform(g *Graph) error {
13 node := &NodeCountBoundary{}
14 g.Add(node)
15
16 // Depends on everything
17 for _, v := range g.Vertices() {
18 // Don't connect to ourselves
19 if v == node {
20 continue
21 }
22
23 // Connect!
24 g.Connect(dag.BasicEdge(node, v))
25 }
26
27 return nil
28}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
new file mode 100644
index 0000000..2148cef
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
@@ -0,0 +1,168 @@
1package terraform
2
3import "fmt"
4
5// DeposedTransformer is a GraphTransformer that adds deposed resources
6// to the graph.
7type DeposedTransformer struct {
8 // State is the global state. We'll automatically find the correct
9 // ModuleState based on the Graph.Path that is being transformed.
10 State *State
11
12 // View, if non-empty, is the ModuleState.View used around the state
13 // to find deposed resources.
14 View string
15}
16
17func (t *DeposedTransformer) Transform(g *Graph) error {
18 state := t.State.ModuleByPath(g.Path)
19 if state == nil {
20 // If there is no state for our module there can't be any deposed
21 // resources, since they live in the state.
22 return nil
23 }
24
25 // If we have a view, apply it now
26 if t.View != "" {
27 state = state.View(t.View)
28 }
29
30 // Go through all the resources in our state to look for deposed resources
31 for k, rs := range state.Resources {
32 // If we have no deposed resources, then move on
33 if len(rs.Deposed) == 0 {
34 continue
35 }
36 deposed := rs.Deposed
37
38 for i, _ := range deposed {
39 g.Add(&graphNodeDeposedResource{
40 Index: i,
41 ResourceName: k,
42 ResourceType: rs.Type,
43 Provider: rs.Provider,
44 })
45 }
46 }
47
48 return nil
49}
50
51// graphNodeDeposedResource is the graph vertex representing a deposed resource.
52type graphNodeDeposedResource struct {
53 Index int
54 ResourceName string
55 ResourceType string
56 Provider string
57}
58
59func (n *graphNodeDeposedResource) Name() string {
60 return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index)
61}
62
63func (n *graphNodeDeposedResource) ProvidedBy() []string {
64 return []string{resourceProvider(n.ResourceName, n.Provider)}
65}
66
67// GraphNodeEvalable impl.
68func (n *graphNodeDeposedResource) EvalTree() EvalNode {
69 var provider ResourceProvider
70 var state *InstanceState
71
72 seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)}
73
74 // Build instance info
75 info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType}
76 seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info})
77
78 // Refresh the resource
79 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
80 Ops: []walkOperation{walkRefresh},
81 Node: &EvalSequence{
82 Nodes: []EvalNode{
83 &EvalGetProvider{
84 Name: n.ProvidedBy()[0],
85 Output: &provider,
86 },
87 &EvalReadStateDeposed{
88 Name: n.ResourceName,
89 Output: &state,
90 Index: n.Index,
91 },
92 &EvalRefresh{
93 Info: info,
94 Provider: &provider,
95 State: &state,
96 Output: &state,
97 },
98 &EvalWriteStateDeposed{
99 Name: n.ResourceName,
100 ResourceType: n.ResourceType,
101 Provider: n.Provider,
102 State: &state,
103 Index: n.Index,
104 },
105 },
106 },
107 })
108
109 // Apply
110 var diff *InstanceDiff
111 var err error
112 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
113 Ops: []walkOperation{walkApply, walkDestroy},
114 Node: &EvalSequence{
115 Nodes: []EvalNode{
116 &EvalGetProvider{
117 Name: n.ProvidedBy()[0],
118 Output: &provider,
119 },
120 &EvalReadStateDeposed{
121 Name: n.ResourceName,
122 Output: &state,
123 Index: n.Index,
124 },
125 &EvalDiffDestroy{
126 Info: info,
127 State: &state,
128 Output: &diff,
129 },
130 // Call pre-apply hook
131 &EvalApplyPre{
132 Info: info,
133 State: &state,
134 Diff: &diff,
135 },
136 &EvalApply{
137 Info: info,
138 State: &state,
139 Diff: &diff,
140 Provider: &provider,
141 Output: &state,
142 Error: &err,
143 },
144 // Always write the resource back to the state deposed... if it
145 // was successfully destroyed it will be pruned. If it was not, it will
146 // be caught on the next run.
147 &EvalWriteStateDeposed{
148 Name: n.ResourceName,
149 ResourceType: n.ResourceType,
150 Provider: n.Provider,
151 State: &state,
152 Index: n.Index,
153 },
154 &EvalApplyPost{
155 Info: info,
156 State: &state,
157 Error: &err,
158 },
159 &EvalReturnError{
160 Error: &err,
161 },
162 &EvalUpdateStateHook{},
163 },
164 },
165 })
166
167 return seq
168}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
new file mode 100644
index 0000000..edfb460
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
@@ -0,0 +1,257 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/config/module"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// GraphNodeDestroyerCBD must be implemented by nodes that might be
12// create-before-destroy destroyers.
13type GraphNodeDestroyerCBD interface {
14 GraphNodeDestroyer
15
16 // CreateBeforeDestroy returns true if this node represents a node
17 // that is doing a CBD.
18 CreateBeforeDestroy() bool
19
20 // ModifyCreateBeforeDestroy is called when the CBD state of a node
21 // is changed dynamically. This can return an error if this isn't
22 // allowed.
23 ModifyCreateBeforeDestroy(bool) error
24}
25
26// CBDEdgeTransformer modifies the edges of CBD nodes that went through
27// the DestroyEdgeTransformer to have the right dependencies. There are
28// two real tasks here:
29//
30// 1. With CBD, the destroy edge is inverted: the destroy depends on
31// the creation.
32//
33// 2. A_d must depend on resources that depend on A. This is to enable
34// the destroy to only happen once nodes that depend on A successfully
35// update to A. Example: adding a web server updates the load balancer
36// before deleting the old web server.
37//
38type CBDEdgeTransformer struct {
39 // Module and State are only needed to look up dependencies in
40 // any way possible. Either can be nil if not availabile.
41 Module *module.Tree
42 State *State
43}
44
45func (t *CBDEdgeTransformer) Transform(g *Graph) error {
46 log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...")
47
48 // Go through and reverse any destroy edges
49 destroyMap := make(map[string][]dag.Vertex)
50 for _, v := range g.Vertices() {
51 dn, ok := v.(GraphNodeDestroyerCBD)
52 if !ok {
53 continue
54 }
55
56 if !dn.CreateBeforeDestroy() {
57 // If there are no CBD ancestors (dependent nodes), then we
58 // do nothing here.
59 if !t.hasCBDAncestor(g, v) {
60 continue
61 }
62
63 // If this isn't naturally a CBD node, this means that an ancestor is
64 // and we need to auto-upgrade this node to CBD. We do this because
65 // a CBD node depending on non-CBD will result in cycles. To avoid this,
66 // we always attempt to upgrade it.
67 if err := dn.ModifyCreateBeforeDestroy(true); err != nil {
68 return fmt.Errorf(
69 "%s: must have create before destroy enabled because "+
70 "a dependent resource has CBD enabled. However, when "+
71 "attempting to automatically do this, an error occurred: %s",
72 dag.VertexName(v), err)
73 }
74 }
75
76 // Find the destroy edge. There should only be one.
77 for _, e := range g.EdgesTo(v) {
78 // Not a destroy edge, ignore it
79 de, ok := e.(*DestroyEdge)
80 if !ok {
81 continue
82 }
83
84 log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s",
85 dag.VertexName(de.Source()), dag.VertexName(de.Target()))
86
87 // Found it! Invert.
88 g.RemoveEdge(de)
89 g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()})
90 }
91
92 // If the address has an index, we strip that. Our depMap creation
93 // graph doesn't expand counts so we don't currently get _exact_
94 // dependencies. One day when we limit dependencies more exactly
95 // this will have to change. We have a test case covering this
96 // (depNonCBDCountBoth) so it'll be caught.
97 addr := dn.DestroyAddr()
98 if addr.Index >= 0 {
99 addr = addr.Copy() // Copy so that we don't modify any pointers
100 addr.Index = -1
101 }
102
103 // Add this to the list of nodes that we need to fix up
104 // the edges for (step 2 above in the docs).
105 key := addr.String()
106 destroyMap[key] = append(destroyMap[key], v)
107 }
108
109 // If we have no CBD nodes, then our work here is done
110 if len(destroyMap) == 0 {
111 return nil
112 }
113
114 // We have CBD nodes. We now have to move on to the much more difficult
115 // task of connecting dependencies of the creation side of the destroy
116 // to the destruction node. The easiest way to explain this is an example:
117 //
118 // Given a pre-destroy dependence of: A => B
119 // And A has CBD set.
120 //
121 // The resulting graph should be: A => B => A_d
122 //
123 // They key here is that B happens before A is destroyed. This is to
124 // facilitate the primary purpose for CBD: making sure that downstreams
125 // are properly updated to avoid downtime before the resource is destroyed.
126 //
127 // We can't trust that the resource being destroyed or anything that
128 // depends on it is actually in our current graph so we make a new
129 // graph in order to determine those dependencies and add them in.
130 log.Printf("[TRACE] CBDEdgeTransformer: building graph to find dependencies...")
131 depMap, err := t.depMap(destroyMap)
132 if err != nil {
133 return err
134 }
135
136 // We now have the mapping of resource addresses to the destroy
137 // nodes they need to depend on. We now go through our own vertices to
138 // find any matching these addresses and make the connection.
139 for _, v := range g.Vertices() {
140 // We're looking for creators
141 rn, ok := v.(GraphNodeCreator)
142 if !ok {
143 continue
144 }
145
146 // Get the address
147 addr := rn.CreateAddr()
148
149 // If the address has an index, we strip that. Our depMap creation
150 // graph doesn't expand counts so we don't currently get _exact_
151 // dependencies. One day when we limit dependencies more exactly
152 // this will have to change. We have a test case covering this
153 // (depNonCBDCount) so it'll be caught.
154 if addr.Index >= 0 {
155 addr = addr.Copy() // Copy so that we don't modify any pointers
156 addr.Index = -1
157 }
158
159 // If there is nothing this resource should depend on, ignore it
160 key := addr.String()
161 dns, ok := depMap[key]
162 if !ok {
163 continue
164 }
165
166 // We have nodes! Make the connection
167 for _, dn := range dns {
168 log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s",
169 dag.VertexName(dn), dag.VertexName(v))
170 g.Connect(dag.BasicEdge(dn, v))
171 }
172 }
173
174 return nil
175}
176
177func (t *CBDEdgeTransformer) depMap(
178 destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) {
179 // Build the graph of our config, this ensures that all resources
180 // are present in the graph.
181 g, err := (&BasicGraphBuilder{
182 Steps: []GraphTransformer{
183 &FlatConfigTransformer{Module: t.Module},
184 &AttachResourceConfigTransformer{Module: t.Module},
185 &AttachStateTransformer{State: t.State},
186 &ReferenceTransformer{},
187 },
188 Name: "CBDEdgeTransformer",
189 }).Build(nil)
190 if err != nil {
191 return nil, err
192 }
193
194 // Using this graph, build the list of destroy nodes that each resource
195 // address should depend on. For example, when we find B, we map the
196 // address of B to A_d in the "depMap" variable below.
197 depMap := make(map[string][]dag.Vertex)
198 for _, v := range g.Vertices() {
199 // We're looking for resources.
200 rn, ok := v.(GraphNodeResource)
201 if !ok {
202 continue
203 }
204
205 // Get the address
206 addr := rn.ResourceAddr()
207 key := addr.String()
208
209 // Get the destroy nodes that are destroying this resource.
210 // If there aren't any, then we don't need to worry about
211 // any connections.
212 dns, ok := destroyMap[key]
213 if !ok {
214 continue
215 }
216
217 // Get the nodes that depend on this on. In the example above:
218 // finding B in A => B.
219 for _, v := range g.UpEdges(v).List() {
220 // We're looking for resources.
221 rn, ok := v.(GraphNodeResource)
222 if !ok {
223 continue
224 }
225
226 // Keep track of the destroy nodes that this address
227 // needs to depend on.
228 key := rn.ResourceAddr().String()
229 depMap[key] = append(depMap[key], dns...)
230 }
231 }
232
233 return depMap, nil
234}
235
236// hasCBDAncestor returns true if any ancestor (node that depends on this)
237// has CBD set.
238func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool {
239 s, _ := g.Ancestors(v)
240 if s == nil {
241 return true
242 }
243
244 for _, v := range s.List() {
245 dn, ok := v.(GraphNodeDestroyerCBD)
246 if !ok {
247 continue
248 }
249
250 if dn.CreateBeforeDestroy() {
251 // some ancestor is CreateBeforeDestroy, so we need to follow suit
252 return true
253 }
254 }
255
256 return false
257}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
new file mode 100644
index 0000000..22be1ab
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
@@ -0,0 +1,269 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config/module"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// GraphNodeDestroyer must be implemented by nodes that destroy resources.
11type GraphNodeDestroyer interface {
12 dag.Vertex
13
14 // ResourceAddr is the address of the resource that is being
15 // destroyed by this node. If this returns nil, then this node
16 // is not destroying anything.
17 DestroyAddr() *ResourceAddress
18}
19
20// GraphNodeCreator must be implemented by nodes that create OR update resources.
21type GraphNodeCreator interface {
22 // ResourceAddr is the address of the resource being created or updated
23 CreateAddr() *ResourceAddress
24}
25
26// DestroyEdgeTransformer is a GraphTransformer that creates the proper
27// references for destroy resources. Destroy resources are more complex
28// in that they must be depend on the destruction of resources that
29// in turn depend on the CREATION of the node being destroy.
30//
31// That is complicated. Visually:
32//
33// B_d -> A_d -> A -> B
34//
35// Notice that A destroy depends on B destroy, while B create depends on
36// A create. They're inverted. This must be done for example because often
37// dependent resources will block parent resources from deleting. Concrete
38// example: VPC with subnets, the VPC can't be deleted while there are
39// still subnets.
40type DestroyEdgeTransformer struct {
41 // These are needed to properly build the graph of dependencies
42 // to determine what a destroy node depends on. Any of these can be nil.
43 Module *module.Tree
44 State *State
45}
46
47func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
48 log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...")
49
50 // Build a map of what is being destroyed (by address string) to
51 // the list of destroyers. In general there will only be one destroyer
52 // but to make it more robust we support multiple.
53 destroyers := make(map[string][]GraphNodeDestroyer)
54 for _, v := range g.Vertices() {
55 dn, ok := v.(GraphNodeDestroyer)
56 if !ok {
57 continue
58 }
59
60 addr := dn.DestroyAddr()
61 if addr == nil {
62 continue
63 }
64
65 key := addr.String()
66 log.Printf(
67 "[TRACE] DestroyEdgeTransformer: %s destroying %q",
68 dag.VertexName(dn), key)
69 destroyers[key] = append(destroyers[key], dn)
70 }
71
72 // If we aren't destroying anything, there will be no edges to make
73 // so just exit early and avoid future work.
74 if len(destroyers) == 0 {
75 return nil
76 }
77
78 // Go through and connect creators to destroyers. Going along with
79 // our example, this makes: A_d => A
80 for _, v := range g.Vertices() {
81 cn, ok := v.(GraphNodeCreator)
82 if !ok {
83 continue
84 }
85
86 addr := cn.CreateAddr()
87 if addr == nil {
88 continue
89 }
90
91 key := addr.String()
92 ds := destroyers[key]
93 if len(ds) == 0 {
94 continue
95 }
96
97 for _, d := range ds {
98 // For illustrating our example
99 a_d := d.(dag.Vertex)
100 a := v
101
102 log.Printf(
103 "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s",
104 dag.VertexName(a), dag.VertexName(a_d))
105
106 g.Connect(&DestroyEdge{S: a, T: a_d})
107 }
108 }
109
110 // This is strange but is the easiest way to get the dependencies
111 // of a node that is being destroyed. We use another graph to make sure
112 // the resource is in the graph and ask for references. We have to do this
113 // because the node that is being destroyed may NOT be in the graph.
114 //
115 // Example: resource A is force new, then destroy A AND create A are
116 // in the graph. BUT if resource A is just pure destroy, then only
117 // destroy A is in the graph, and create A is not.
118 providerFn := func(a *NodeAbstractProvider) dag.Vertex {
119 return &NodeApplyableProvider{NodeAbstractProvider: a}
120 }
121 steps := []GraphTransformer{
122 // Add outputs and metadata
123 &OutputTransformer{Module: t.Module},
124 &AttachResourceConfigTransformer{Module: t.Module},
125 &AttachStateTransformer{State: t.State},
126
127 // Add providers since they can affect destroy order as well
128 &MissingProviderTransformer{AllowAny: true, Concrete: providerFn},
129 &ProviderTransformer{},
130 &DisableProviderTransformer{},
131 &ParentProviderTransformer{},
132 &AttachProviderConfigTransformer{Module: t.Module},
133
134 // Add all the variables. We can depend on resources through
135 // variables due to module parameters, and we need to properly
136 // determine that.
137 &RootVariableTransformer{Module: t.Module},
138 &ModuleVariableTransformer{Module: t.Module},
139
140 &ReferenceTransformer{},
141 }
142
143 // Go through all the nodes being destroyed and create a graph.
144 // The resulting graph is only of things being CREATED. For example,
145 // following our example, the resulting graph would be:
146 //
147 // A, B (with no edges)
148 //
149 var tempG Graph
150 var tempDestroyed []dag.Vertex
151 for d, _ := range destroyers {
152 // d is what is being destroyed. We parse the resource address
153 // which it came from it is a panic if this fails.
154 addr, err := ParseResourceAddress(d)
155 if err != nil {
156 panic(err)
157 }
158
159 // This part is a little bit weird but is the best way to
160 // find the dependencies we need to: build a graph and use the
161 // attach config and state transformers then ask for references.
162 abstract := &NodeAbstractResource{Addr: addr}
163 tempG.Add(abstract)
164 tempDestroyed = append(tempDestroyed, abstract)
165
166 // We also add the destroy version here since the destroy can
167 // depend on things that the creation doesn't (destroy provisioners).
168 destroy := &NodeDestroyResource{NodeAbstractResource: abstract}
169 tempG.Add(destroy)
170 tempDestroyed = append(tempDestroyed, destroy)
171 }
172
173 // Run the graph transforms so we have the information we need to
174 // build references.
175 for _, s := range steps {
176 if err := s.Transform(&tempG); err != nil {
177 return err
178 }
179 }
180
181 log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String())
182
183 // Go through all the nodes in the graph and determine what they
184 // depend on.
185 for _, v := range tempDestroyed {
186 // Find all ancestors of this to determine the edges we'll depend on
187 vs, err := tempG.Ancestors(v)
188 if err != nil {
189 return err
190 }
191
192 refs := make([]dag.Vertex, 0, vs.Len())
193 for _, raw := range vs.List() {
194 refs = append(refs, raw.(dag.Vertex))
195 }
196
197 refNames := make([]string, len(refs))
198 for i, ref := range refs {
199 refNames[i] = dag.VertexName(ref)
200 }
201 log.Printf(
202 "[TRACE] DestroyEdgeTransformer: creation node %q references %s",
203 dag.VertexName(v), refNames)
204
205 // If we have no references, then we won't need to do anything
206 if len(refs) == 0 {
207 continue
208 }
209
210 // Get the destroy node for this. In the example of our struct,
211 // we are currently at B and we're looking for B_d.
212 rn, ok := v.(GraphNodeResource)
213 if !ok {
214 continue
215 }
216
217 addr := rn.ResourceAddr()
218 if addr == nil {
219 continue
220 }
221
222 dns := destroyers[addr.String()]
223
224 // We have dependencies, check if any are being destroyed
225 // to build the list of things that we must depend on!
226 //
227 // In the example of the struct, if we have:
228 //
229 // B_d => A_d => A => B
230 //
231 // Then at this point in the algorithm we started with B_d,
232 // we built B (to get dependencies), and we found A. We're now looking
233 // to see if A_d exists.
234 var depDestroyers []dag.Vertex
235 for _, v := range refs {
236 rn, ok := v.(GraphNodeResource)
237 if !ok {
238 continue
239 }
240
241 addr := rn.ResourceAddr()
242 if addr == nil {
243 continue
244 }
245
246 key := addr.String()
247 if ds, ok := destroyers[key]; ok {
248 for _, d := range ds {
249 depDestroyers = append(depDestroyers, d.(dag.Vertex))
250 log.Printf(
251 "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s",
252 key, dag.VertexName(d))
253 }
254 }
255 }
256
257 // Go through and make the connections. Use the variable
258 // names "a_d" and "b_d" to reference our example.
259 for _, a_d := range dns {
260 for _, b_d := range depDestroyers {
261 if b_d != a_d {
262 g.Connect(dag.BasicEdge(b_d, a_d))
263 }
264 }
265 }
266 }
267
268 return nil
269}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
new file mode 100644
index 0000000..ad46d3c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
@@ -0,0 +1,86 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/config/module"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// DiffTransformer is a GraphTransformer that adds the elements of
12// the diff to the graph.
13//
14// This transform is used for example by the ApplyGraphBuilder to ensure
15// that only resources that are being modified are represented in the graph.
16//
17// Module and State is still required for the DiffTransformer for annotations
18// since the Diff doesn't contain all the information required to build the
19// complete graph (such as create-before-destroy information). The graph
20// is built based on the diff first, though, ensuring that only resources
21// that are being modified are present in the graph.
22type DiffTransformer struct {
23 Concrete ConcreteResourceNodeFunc
24
25 Diff *Diff
26 Module *module.Tree
27 State *State
28}
29
30func (t *DiffTransformer) Transform(g *Graph) error {
31 // If the diff is nil or empty (nil is empty) then do nothing
32 if t.Diff.Empty() {
33 return nil
34 }
35
36 // Go through all the modules in the diff.
37 log.Printf("[TRACE] DiffTransformer: starting")
38 var nodes []dag.Vertex
39 for _, m := range t.Diff.Modules {
40 log.Printf("[TRACE] DiffTransformer: Module: %s", m)
41 // TODO: If this is a destroy diff then add a module destroy node
42
43 // Go through all the resources in this module.
44 for name, inst := range m.Resources {
45 log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst)
46
47 // We have changes! This is a create or update operation.
48 // First grab the address so we have a unique way to
49 // reference this resource.
50 addr, err := parseResourceAddressInternal(name)
51 if err != nil {
52 panic(fmt.Sprintf(
53 "Error parsing internal name, this is a bug: %q", name))
54 }
55
56 // Very important: add the module path for this resource to
57 // the address. Remove "root" from it.
58 addr.Path = m.Path[1:]
59
60 // If we're destroying, add the destroy node
61 if inst.Destroy || inst.GetDestroyDeposed() {
62 abstract := &NodeAbstractResource{Addr: addr}
63 g.Add(&NodeDestroyResource{NodeAbstractResource: abstract})
64 }
65
66 // If we have changes, then add the applyable version
67 if len(inst.Attributes) > 0 {
68 // Add the resource to the graph
69 abstract := &NodeAbstractResource{Addr: addr}
70 var node dag.Vertex = abstract
71 if f := t.Concrete; f != nil {
72 node = f(abstract)
73 }
74
75 nodes = append(nodes, node)
76 }
77 }
78 }
79
80 // Add all the nodes to the graph
81 for _, n := range nodes {
82 g.Add(n)
83 }
84
85 return nil
86}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go
new file mode 100644
index 0000000..982c098
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go
@@ -0,0 +1,48 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// GraphNodeExapndable is an interface that nodes can implement to
10// signal that they can be expanded. Expanded nodes turn into
11// GraphNodeSubgraph nodes within the graph.
12type GraphNodeExpandable interface {
13 Expand(GraphBuilder) (GraphNodeSubgraph, error)
14}
15
16// GraphNodeDynamicExpandable is an interface that nodes can implement
17// to signal that they can be expanded at eval-time (hence dynamic).
18// These nodes are given the eval context and are expected to return
19// a new subgraph.
20type GraphNodeDynamicExpandable interface {
21 DynamicExpand(EvalContext) (*Graph, error)
22}
23
24// GraphNodeSubgraph is an interface a node can implement if it has
25// a larger subgraph that should be walked.
26type GraphNodeSubgraph interface {
27 Subgraph() dag.Grapher
28}
29
30// ExpandTransform is a transformer that does a subgraph expansion
31// at graph transform time (vs. at eval time). The benefit of earlier
32// subgraph expansion is that errors with the graph build can be detected
33// at an earlier stage.
34type ExpandTransform struct {
35 Builder GraphBuilder
36}
37
38func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) {
39 ev, ok := v.(GraphNodeExpandable)
40 if !ok {
41 // This isn't an expandable vertex, so just ignore it.
42 return v, nil
43 }
44
45 // Expand the subgraph!
46 log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev))
47 return ev.Expand(t.Builder)
48}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
new file mode 100644
index 0000000..3673771
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
@@ -0,0 +1,38 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6)
7
8// ImportProviderValidateTransformer is a GraphTransformer that goes through
9// the providers in the graph and validates that they only depend on variables.
10type ImportProviderValidateTransformer struct{}
11
12func (t *ImportProviderValidateTransformer) Transform(g *Graph) error {
13 for _, v := range g.Vertices() {
14 // We only care about providers
15 pv, ok := v.(GraphNodeProvider)
16 if !ok {
17 continue
18 }
19
20 // We only care about providers that reference things
21 rn, ok := pv.(GraphNodeReferencer)
22 if !ok {
23 continue
24 }
25
26 for _, ref := range rn.References() {
27 if !strings.HasPrefix(ref, "var.") {
28 return fmt.Errorf(
29 "Provider %q depends on non-var %q. Providers for import can currently\n"+
30 "only depend on variables or must be hardcoded. You can stop import\n"+
31 "from loading configurations by specifying `-config=\"\"`.",
32 pv.ProviderName(), ref)
33 }
34 }
35 }
36
37 return nil
38}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
new file mode 100644
index 0000000..081df2f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
@@ -0,0 +1,241 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// ImportStateTransformer is a GraphTransformer that adds nodes to the
8// graph to represent the imports we want to do for resources.
9type ImportStateTransformer struct {
10 Targets []*ImportTarget
11}
12
13func (t *ImportStateTransformer) Transform(g *Graph) error {
14 nodes := make([]*graphNodeImportState, 0, len(t.Targets))
15 for _, target := range t.Targets {
16 addr, err := ParseResourceAddress(target.Addr)
17 if err != nil {
18 return fmt.Errorf(
19 "failed to parse resource address '%s': %s",
20 target.Addr, err)
21 }
22
23 nodes = append(nodes, &graphNodeImportState{
24 Addr: addr,
25 ID: target.ID,
26 Provider: target.Provider,
27 })
28 }
29
30 // Build the graph vertices
31 for _, n := range nodes {
32 g.Add(n)
33 }
34
35 return nil
36}
37
38type graphNodeImportState struct {
39 Addr *ResourceAddress // Addr is the resource address to import to
40 ID string // ID is the ID to import as
41 Provider string // Provider string
42
43 states []*InstanceState
44}
45
46func (n *graphNodeImportState) Name() string {
47 return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID)
48}
49
50func (n *graphNodeImportState) ProvidedBy() []string {
51 return []string{resourceProvider(n.Addr.Type, n.Provider)}
52}
53
54// GraphNodeSubPath
55func (n *graphNodeImportState) Path() []string {
56 return normalizeModulePath(n.Addr.Path)
57}
58
59// GraphNodeEvalable impl.
60func (n *graphNodeImportState) EvalTree() EvalNode {
61 var provider ResourceProvider
62 info := &InstanceInfo{
63 Id: fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name),
64 ModulePath: n.Path(),
65 Type: n.Addr.Type,
66 }
67
68 // Reset our states
69 n.states = nil
70
71 // Return our sequence
72 return &EvalSequence{
73 Nodes: []EvalNode{
74 &EvalGetProvider{
75 Name: n.ProvidedBy()[0],
76 Output: &provider,
77 },
78 &EvalImportState{
79 Provider: &provider,
80 Info: info,
81 Id: n.ID,
82 Output: &n.states,
83 },
84 },
85 }
86}
87
88// GraphNodeDynamicExpandable impl.
89//
90// We use DynamicExpand as a way to generate the subgraph of refreshes
91// and state inserts we need to do for our import state. Since they're new
92// resources they don't depend on anything else and refreshes are isolated
93// so this is nearly a perfect use case for dynamic expand.
94func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
95 g := &Graph{Path: ctx.Path()}
96
97 // nameCounter is used to de-dup names in the state.
98 nameCounter := make(map[string]int)
99
100 // Compile the list of addresses that we'll be inserting into the state.
101 // We do this ahead of time so we can verify that we aren't importing
102 // something that already exists.
103 addrs := make([]*ResourceAddress, len(n.states))
104 for i, state := range n.states {
105 addr := *n.Addr
106 if t := state.Ephemeral.Type; t != "" {
107 addr.Type = t
108 }
109
110 // Determine if we need to suffix the name to de-dup
111 key := addr.String()
112 count, ok := nameCounter[key]
113 if ok {
114 count++
115 addr.Name += fmt.Sprintf("-%d", count)
116 }
117 nameCounter[key] = count
118
119 // Add it to our list
120 addrs[i] = &addr
121 }
122
123 // Verify that all the addresses are clear
124 state, lock := ctx.State()
125 lock.RLock()
126 defer lock.RUnlock()
127 filter := &StateFilter{State: state}
128 for _, addr := range addrs {
129 result, err := filter.Filter(addr.String())
130 if err != nil {
131 return nil, fmt.Errorf("Error verifying address %s: %s", addr, err)
132 }
133
134 // Go through the filter results and it is an error if we find
135 // a matching InstanceState, meaning that we would have a collision.
136 for _, r := range result {
137 if _, ok := r.Value.(*InstanceState); ok {
138 return nil, fmt.Errorf(
139 "Can't import %s, would collide with an existing resource.\n\n"+
140 "Please remove or rename this resource before continuing.",
141 addr)
142 }
143 }
144 }
145
146 // For each of the states, we add a node to handle the refresh/add to state.
147 // "n.states" is populated by our own EvalTree with the result of
148 // ImportState. Since DynamicExpand is always called after EvalTree, this
149 // is safe.
150 for i, state := range n.states {
151 g.Add(&graphNodeImportStateSub{
152 Target: addrs[i],
153 Path_: n.Path(),
154 State: state,
155 Provider: n.Provider,
156 })
157 }
158
159 // Root transform for a single root
160 t := &RootTransformer{}
161 if err := t.Transform(g); err != nil {
162 return nil, err
163 }
164
165 // Done!
166 return g, nil
167}
168
169// graphNodeImportStateSub is the sub-node of graphNodeImportState
170// and is part of the subgraph. This node is responsible for refreshing
171// and adding a resource to the state once it is imported.
172type graphNodeImportStateSub struct {
173 Target *ResourceAddress
174 State *InstanceState
175 Path_ []string
176 Provider string
177}
178
179func (n *graphNodeImportStateSub) Name() string {
180 return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID)
181}
182
183func (n *graphNodeImportStateSub) Path() []string {
184 return n.Path_
185}
186
187// GraphNodeEvalable impl.
188func (n *graphNodeImportStateSub) EvalTree() EvalNode {
189 // If the Ephemeral type isn't set, then it is an error
190 if n.State.Ephemeral.Type == "" {
191 err := fmt.Errorf(
192 "import of %s didn't set type for %s",
193 n.Target.String(), n.State.ID)
194 return &EvalReturnError{Error: &err}
195 }
196
197 // DeepCopy so we're only modifying our local copy
198 state := n.State.DeepCopy()
199
200 // Build the resource info
201 info := &InstanceInfo{
202 Id: fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name),
203 ModulePath: n.Path_,
204 Type: n.State.Ephemeral.Type,
205 }
206
207 // Key is the resource key
208 key := &ResourceStateKey{
209 Name: n.Target.Name,
210 Type: info.Type,
211 Index: n.Target.Index,
212 }
213
214 // The eval sequence
215 var provider ResourceProvider
216 return &EvalSequence{
217 Nodes: []EvalNode{
218 &EvalGetProvider{
219 Name: resourceProvider(info.Type, n.Provider),
220 Output: &provider,
221 },
222 &EvalRefresh{
223 Provider: &provider,
224 State: &state,
225 Info: info,
226 Output: &state,
227 },
228 &EvalImportStateVerify{
229 Info: info,
230 Id: n.State.ID,
231 State: &state,
232 },
233 &EvalWriteState{
234 Name: key.String(),
235 ResourceType: info.Type,
236 Provider: resourceProvider(info.Type, n.Provider),
237 State: &state,
238 },
239 },
240 }
241}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
new file mode 100644
index 0000000..467950b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
@@ -0,0 +1,120 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/config/module"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// ModuleVariableTransformer is a GraphTransformer that adds all the variables
12// in the configuration to the graph.
13//
14// This only adds variables that are referenced by other things in the graph.
15// If a module variable is not referenced, it won't be added to the graph.
16type ModuleVariableTransformer struct {
17 Module *module.Tree
18
19 DisablePrune bool // True if pruning unreferenced should be disabled
20}
21
22func (t *ModuleVariableTransformer) Transform(g *Graph) error {
23 return t.transform(g, nil, t.Module)
24}
25
26func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error {
27 // If no config, no variables
28 if m == nil {
29 return nil
30 }
31
32 // Transform all the children. This must be done BEFORE the transform
33 // above since child module variables can reference parent module variables.
34 for _, c := range m.Children() {
35 if err := t.transform(g, m, c); err != nil {
36 return err
37 }
38 }
39
40 // If we have a parent, we can determine if a module variable is being
41 // used, so we transform this.
42 if parent != nil {
43 if err := t.transformSingle(g, parent, m); err != nil {
44 return err
45 }
46 }
47
48 return nil
49}
50
51func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error {
52 // If we have no vars, we're done!
53 vars := m.Config().Variables
54 if len(vars) == 0 {
55 log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path())
56 return nil
57 }
58
59 // Look for usage of this module
60 var mod *config.Module
61 for _, modUse := range parent.Config().Modules {
62 if modUse.Name == m.Name() {
63 mod = modUse
64 break
65 }
66 }
67 if mod == nil {
68 log.Printf("[INFO] Module %#v not used, not adding variables", m.Path())
69 return nil
70 }
71
72 // Build the reference map so we can determine if we're referencing things.
73 refMap := NewReferenceMap(g.Vertices())
74
75 // Add all variables here
76 for _, v := range vars {
77 // Determine the value of the variable. If it isn't in the
78 // configuration then it was never set and that's not a problem.
79 var value *config.RawConfig
80 if raw, ok := mod.RawConfig.Raw[v.Name]; ok {
81 var err error
82 value, err = config.NewRawConfig(map[string]interface{}{
83 v.Name: raw,
84 })
85 if err != nil {
86 // This shouldn't happen because it is already in
87 // a RawConfig above meaning it worked once before.
88 panic(err)
89 }
90 }
91
92 // Build the node.
93 //
94 // NOTE: For now this is just an "applyable" variable. As we build
95 // new graph builders for the other operations I suspect we'll
96 // find a way to parameterize this, require new transforms, etc.
97 node := &NodeApplyableModuleVariable{
98 PathValue: normalizeModulePath(m.Path()),
99 Config: v,
100 Value: value,
101 Module: t.Module,
102 }
103
104 if !t.DisablePrune {
105 // If the node is not referenced by anything, then we don't need
106 // to include it since it won't be used.
107 if matches := refMap.ReferencedBy(node); len(matches) == 0 {
108 log.Printf(
109 "[INFO] Not including %q in graph, nothing depends on it",
110 dag.VertexName(node))
111 continue
112 }
113 }
114
115 // Add it!
116 g.Add(node)
117 }
118
119 return nil
120}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
new file mode 100644
index 0000000..b256a25
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
@@ -0,0 +1,110 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// OrphanResourceCountTransformer is a GraphTransformer that adds orphans
10// for an expanded count to the graph. The determination of this depends
11// on the count argument given.
12//
13// Orphans are found by comparing the count to what is found in the state.
14// This transform assumes that if an element in the state is within the count
15// bounds given, that it is not an orphan.
16type OrphanResourceCountTransformer struct {
17 Concrete ConcreteResourceNodeFunc
18
19 Count int // Actual count of the resource
20 Addr *ResourceAddress // Addr of the resource to look for orphans
21 State *State // Full global state
22}
23
24func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
25 log.Printf("[TRACE] OrphanResourceCount: Starting...")
26
27 // Grab the module in the state just for this resource address
28 ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path))
29 if ms == nil {
30 // If no state, there can't be orphans
31 return nil
32 }
33
34 orphanIndex := -1
35 if t.Count == 1 {
36 orphanIndex = 0
37 }
38
39 // Go through the orphans and add them all to the state
40 for key, _ := range ms.Resources {
41 // Build the address
42 addr, err := parseResourceAddressInternal(key)
43 if err != nil {
44 return err
45 }
46 addr.Path = ms.Path[1:]
47
48 // Copy the address for comparison. If we aren't looking at
49 // the same resource, then just ignore it.
50 addrCopy := addr.Copy()
51 addrCopy.Index = -1
52 if !addrCopy.Equals(t.Addr) {
53 continue
54 }
55
56 log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr)
57
58 idx := addr.Index
59
60 // If we have zero and the index here is 0 or 1, then we
61 // change the index to a high number so that we treat it as
62 // an orphan.
63 if t.Count <= 0 && idx <= 0 {
64 idx = t.Count + 1
65 }
66
67 // If we have a count greater than 0 and we're at the zero index,
68 // we do a special case check to see if our state also has a
69 // -1 index value. If so, this is an orphan because our rules are
70 // that if both a -1 and 0 are in the state, the 0 is destroyed.
71 if t.Count > 0 && idx == orphanIndex {
72 // This is a piece of cleverness (beware), but its simple:
73 // if orphanIndex is 0, then check -1, else check 0.
74 checkIndex := (orphanIndex + 1) * -1
75
76 key := &ResourceStateKey{
77 Name: addr.Name,
78 Type: addr.Type,
79 Mode: addr.Mode,
80 Index: checkIndex,
81 }
82
83 if _, ok := ms.Resources[key.String()]; ok {
84 // We have a -1 index, too. Make an arbitrarily high
85 // index so that we always mark this as an orphan.
86 log.Printf(
87 "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d",
88 addr, orphanIndex)
89 idx = t.Count + 1
90 }
91 }
92
93 // If the index is within the count bounds, it is not an orphan
94 if idx < t.Count {
95 continue
96 }
97
98 // Build the abstract node and the concrete one
99 abstract := &NodeAbstractResource{Addr: addr}
100 var node dag.Vertex = abstract
101 if f := t.Concrete; f != nil {
102 node = f(abstract)
103 }
104
105 // Add it to the graph
106 g.Add(node)
107 }
108
109 return nil
110}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
new file mode 100644
index 0000000..49568d5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
@@ -0,0 +1,64 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config"
7 "github.com/hashicorp/terraform/config/module"
8)
9
10// OrphanOutputTransformer finds the outputs that aren't present
11// in the given config that are in the state and adds them to the graph
12// for deletion.
13type OrphanOutputTransformer struct {
14 Module *module.Tree // Root module
15 State *State // State is the root state
16}
17
18func (t *OrphanOutputTransformer) Transform(g *Graph) error {
19 if t.State == nil {
20 log.Printf("[DEBUG] No state, no orphan outputs")
21 return nil
22 }
23
24 return t.transform(g, t.Module)
25}
26
27func (t *OrphanOutputTransformer) transform(g *Graph, m *module.Tree) error {
28 // Get our configuration, and recurse into children
29 var c *config.Config
30 if m != nil {
31 c = m.Config()
32 for _, child := range m.Children() {
33 if err := t.transform(g, child); err != nil {
34 return err
35 }
36 }
37 }
38
39 // Get the state. If there is no state, then we have no orphans!
40 path := normalizeModulePath(m.Path())
41 state := t.State.ModuleByPath(path)
42 if state == nil {
43 return nil
44 }
45
46 // Make a map of the valid outputs
47 valid := make(map[string]struct{})
48 for _, o := range c.Outputs {
49 valid[o.Name] = struct{}{}
50 }
51
52 // Go through the outputs and find the ones that aren't in our config.
53 for n, _ := range state.Outputs {
54 // If it is in the valid map, then ignore
55 if _, ok := valid[n]; ok {
56 continue
57 }
58
59 // Orphan!
60 g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path})
61 }
62
63 return nil
64}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
new file mode 100644
index 0000000..e42d3c8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
@@ -0,0 +1,78 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config"
5 "github.com/hashicorp/terraform/config/module"
6 "github.com/hashicorp/terraform/dag"
7)
8
9// OrphanResourceTransformer is a GraphTransformer that adds resource
10// orphans to the graph. A resource orphan is a resource that is
11// represented in the state but not in the configuration.
12//
13// This only adds orphans that have no representation at all in the
14// configuration.
15type OrphanResourceTransformer struct {
16 Concrete ConcreteResourceNodeFunc
17
18 // State is the global state. We require the global state to
19 // properly find module orphans at our path.
20 State *State
21
22 // Module is the root module. We'll look up the proper configuration
23 // using the graph path.
24 Module *module.Tree
25}
26
27func (t *OrphanResourceTransformer) Transform(g *Graph) error {
28 if t.State == nil {
29 // If the entire state is nil, there can't be any orphans
30 return nil
31 }
32
33 // Go through the modules and for each module transform in order
34 // to add the orphan.
35 for _, ms := range t.State.Modules {
36 if err := t.transform(g, ms); err != nil {
37 return err
38 }
39 }
40
41 return nil
42}
43
44func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error {
45 if ms == nil {
46 return nil
47 }
48
49 // Get the configuration for this path. The configuration might be
50 // nil if the module was removed from the configuration. This is okay,
51 // this just means that every resource is an orphan.
52 var c *config.Config
53 if m := t.Module.Child(ms.Path[1:]); m != nil {
54 c = m.Config()
55 }
56
57 // Go through the orphans and add them all to the state
58 for _, key := range ms.Orphans(c) {
59 // Build the abstract resource
60 addr, err := parseResourceAddressInternal(key)
61 if err != nil {
62 return err
63 }
64 addr.Path = ms.Path[1:]
65
66 // Build the abstract node and the concrete one
67 abstract := &NodeAbstractResource{Addr: addr}
68 var node dag.Vertex = abstract
69 if f := t.Concrete; f != nil {
70 node = f(abstract)
71 }
72
73 // Add it to the graph
74 g.Add(node)
75 }
76
77 return nil
78}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
new file mode 100644
index 0000000..b260f4c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
@@ -0,0 +1,59 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5)
6
7// OutputTransformer is a GraphTransformer that adds all the outputs
8// in the configuration to the graph.
9//
10// This is done for the apply graph builder even if dependent nodes
11// aren't changing since there is no downside: the state will be available
12// even if the dependent items aren't changing.
13type OutputTransformer struct {
14 Module *module.Tree
15}
16
17func (t *OutputTransformer) Transform(g *Graph) error {
18 return t.transform(g, t.Module)
19}
20
21func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error {
22 // If no config, no outputs
23 if m == nil {
24 return nil
25 }
26
27 // Transform all the children. We must do this first because
28 // we can reference module outputs and they must show up in the
29 // reference map.
30 for _, c := range m.Children() {
31 if err := t.transform(g, c); err != nil {
32 return err
33 }
34 }
35
36 // If we have no outputs, we're done!
37 os := m.Config().Outputs
38 if len(os) == 0 {
39 return nil
40 }
41
42 // Add all outputs here
43 for _, o := range os {
44 // Build the node.
45 //
46 // NOTE: For now this is just an "applyable" output. As we build
47 // new graph builders for the other operations I suspect we'll
48 // find a way to parameterize this, require new transforms, etc.
49 node := &NodeApplyableOutput{
50 PathValue: normalizeModulePath(m.Path()),
51 Config: o,
52 }
53
54 // Add it!
55 g.Add(node)
56 }
57
58 return nil
59}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
new file mode 100644
index 0000000..b9695d5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
@@ -0,0 +1,380 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strings"
7
8 "github.com/hashicorp/go-multierror"
9 "github.com/hashicorp/terraform/dag"
10)
11
12// GraphNodeProvider is an interface that nodes that can be a provider
13// must implement. The ProviderName returned is the name of the provider
14// they satisfy.
15type GraphNodeProvider interface {
16 ProviderName() string
17}
18
19// GraphNodeCloseProvider is an interface that nodes that can be a close
20// provider must implement. The CloseProviderName returned is the name of
21// the provider they satisfy.
22type GraphNodeCloseProvider interface {
23 CloseProviderName() string
24}
25
26// GraphNodeProviderConsumer is an interface that nodes that require
27// a provider must implement. ProvidedBy must return the name of the provider
28// to use.
29type GraphNodeProviderConsumer interface {
30 ProvidedBy() []string
31}
32
33// ProviderTransformer is a GraphTransformer that maps resources to
34// providers within the graph. This will error if there are any resources
35// that don't map to proper resources.
36type ProviderTransformer struct{}
37
38func (t *ProviderTransformer) Transform(g *Graph) error {
39 // Go through the other nodes and match them to providers they need
40 var err error
41 m := providerVertexMap(g)
42 for _, v := range g.Vertices() {
43 if pv, ok := v.(GraphNodeProviderConsumer); ok {
44 for _, p := range pv.ProvidedBy() {
45 target := m[providerMapKey(p, pv)]
46 if target == nil {
47 println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv)))
48 err = multierror.Append(err, fmt.Errorf(
49 "%s: provider %s couldn't be found",
50 dag.VertexName(v), p))
51 continue
52 }
53
54 g.Connect(dag.BasicEdge(v, target))
55 }
56 }
57 }
58
59 return err
60}
61
62// CloseProviderTransformer is a GraphTransformer that adds nodes to the
63// graph that will close open provider connections that aren't needed anymore.
64// A provider connection is not needed anymore once all depended resources
65// in the graph are evaluated.
66type CloseProviderTransformer struct{}
67
68func (t *CloseProviderTransformer) Transform(g *Graph) error {
69 pm := providerVertexMap(g)
70 cpm := closeProviderVertexMap(g)
71 var err error
72 for _, v := range g.Vertices() {
73 if pv, ok := v.(GraphNodeProviderConsumer); ok {
74 for _, p := range pv.ProvidedBy() {
75 key := p
76 source := cpm[key]
77
78 if source == nil {
79 // Create a new graphNodeCloseProvider and add it to the graph
80 source = &graphNodeCloseProvider{ProviderNameValue: p}
81 g.Add(source)
82
83 // Close node needs to depend on provider
84 provider, ok := pm[key]
85 if !ok {
86 err = multierror.Append(err, fmt.Errorf(
87 "%s: provider %s couldn't be found for closing",
88 dag.VertexName(v), p))
89 continue
90 }
91 g.Connect(dag.BasicEdge(source, provider))
92
93 // Make sure we also add the new graphNodeCloseProvider to the map
94 // so we don't create and add any duplicate graphNodeCloseProviders.
95 cpm[key] = source
96 }
97
98 // Close node depends on all nodes provided by the provider
99 g.Connect(dag.BasicEdge(source, v))
100 }
101 }
102 }
103
104 return err
105}
106
107// MissingProviderTransformer is a GraphTransformer that adds nodes
108// for missing providers into the graph. Specifically, it creates provider
109// configuration nodes for all the providers that we support. These are
110// pruned later during an optimization pass.
111type MissingProviderTransformer struct {
112 // Providers is the list of providers we support.
113 Providers []string
114
115 // AllowAny will not check that a provider is supported before adding
116 // it to the graph.
117 AllowAny bool
118
119 // Concrete, if set, overrides how the providers are made.
120 Concrete ConcreteProviderNodeFunc
121}
122
123func (t *MissingProviderTransformer) Transform(g *Graph) error {
124 // Initialize factory
125 if t.Concrete == nil {
126 t.Concrete = func(a *NodeAbstractProvider) dag.Vertex {
127 return a
128 }
129 }
130
131 // Create a set of our supported providers
132 supported := make(map[string]struct{}, len(t.Providers))
133 for _, v := range t.Providers {
134 supported[v] = struct{}{}
135 }
136
137 // Get the map of providers we already have in our graph
138 m := providerVertexMap(g)
139
140 // Go through all the provider consumers and make sure we add
141 // that provider if it is missing. We use a for loop here instead
142 // of "range" since we'll modify check as we go to add more to check.
143 check := g.Vertices()
144 for i := 0; i < len(check); i++ {
145 v := check[i]
146
147 pv, ok := v.(GraphNodeProviderConsumer)
148 if !ok {
149 continue
150 }
151
152 // If this node has a subpath, then we use that as a prefix
153 // into our map to check for an existing provider.
154 var path []string
155 if sp, ok := pv.(GraphNodeSubPath); ok {
156 raw := normalizeModulePath(sp.Path())
157 if len(raw) > len(rootModulePath) {
158 path = raw
159 }
160 }
161
162 for _, p := range pv.ProvidedBy() {
163 key := providerMapKey(p, pv)
164 if _, ok := m[key]; ok {
165 // This provider already exists as a configure node
166 continue
167 }
168
169 // If the provider has an alias in it, we just want the type
170 ptype := p
171 if idx := strings.IndexRune(p, '.'); idx != -1 {
172 ptype = p[:idx]
173 }
174
175 if !t.AllowAny {
176 if _, ok := supported[ptype]; !ok {
177 // If we don't support the provider type, skip it.
178 // Validation later will catch this as an error.
179 continue
180 }
181 }
182
183 // Add the missing provider node to the graph
184 v := t.Concrete(&NodeAbstractProvider{
185 NameValue: p,
186 PathValue: path,
187 }).(dag.Vertex)
188 if len(path) > 0 {
189 // We'll need the parent provider as well, so let's
190 // add a dummy node to check to make sure that we add
191 // that parent provider.
192 check = append(check, &graphNodeProviderConsumerDummy{
193 ProviderValue: p,
194 PathValue: path[:len(path)-1],
195 })
196 }
197
198 m[key] = g.Add(v)
199 }
200 }
201
202 return nil
203}
204
205// ParentProviderTransformer connects provider nodes to their parents.
206//
207// This works by finding nodes that are both GraphNodeProviders and
208// GraphNodeSubPath. It then connects the providers to their parent
209// path.
210type ParentProviderTransformer struct{}
211
212func (t *ParentProviderTransformer) Transform(g *Graph) error {
213 // Make a mapping of path to dag.Vertex, where path is: "path.name"
214 m := make(map[string]dag.Vertex)
215
216 // Also create a map that maps a provider to its parent
217 parentMap := make(map[dag.Vertex]string)
218 for _, raw := range g.Vertices() {
219 // If it is the flat version, then make it the non-flat version.
220 // We eventually want to get rid of the flat version entirely so
221 // this is a stop-gap while it still exists.
222 var v dag.Vertex = raw
223
224 // Only care about providers
225 pn, ok := v.(GraphNodeProvider)
226 if !ok || pn.ProviderName() == "" {
227 continue
228 }
229
230 // Also require a subpath, if there is no subpath then we
231 // just totally ignore it. The expectation of this transform is
232 // that it is used with a graph builder that is already flattened.
233 var path []string
234 if pn, ok := raw.(GraphNodeSubPath); ok {
235 path = pn.Path()
236 }
237 path = normalizeModulePath(path)
238
239 // Build the key with path.name i.e. "child.subchild.aws"
240 key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
241 m[key] = raw
242
243 // Determine the parent if we're non-root. This is length 1 since
244 // the 0 index should be "root" since we normalize above.
245 if len(path) > 1 {
246 path = path[:len(path)-1]
247 key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
248 parentMap[raw] = key
249 }
250 }
251
252 // Connect!
253 for v, key := range parentMap {
254 if parent, ok := m[key]; ok {
255 g.Connect(dag.BasicEdge(v, parent))
256 }
257 }
258
259 return nil
260}
261
262// PruneProviderTransformer is a GraphTransformer that prunes all the
263// providers that aren't needed from the graph. A provider is unneeded if
264// no resource or module is using that provider.
265type PruneProviderTransformer struct{}
266
267func (t *PruneProviderTransformer) Transform(g *Graph) error {
268 for _, v := range g.Vertices() {
269 // We only care about the providers
270 if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" {
271 continue
272 }
273 // Does anything depend on this? If not, then prune it.
274 if s := g.UpEdges(v); s.Len() == 0 {
275 if nv, ok := v.(dag.NamedVertex); ok {
276 log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name())
277 }
278 g.Remove(v)
279 }
280 }
281
282 return nil
283}
284
285// providerMapKey is a helper that gives us the key to use for the
286// maps returned by things such as providerVertexMap.
287func providerMapKey(k string, v dag.Vertex) string {
288 pathPrefix := ""
289 if sp, ok := v.(GraphNodeSubPath); ok {
290 raw := normalizeModulePath(sp.Path())
291 if len(raw) > len(rootModulePath) {
292 pathPrefix = modulePrefixStr(raw) + "."
293 }
294 }
295
296 return pathPrefix + k
297}
298
299func providerVertexMap(g *Graph) map[string]dag.Vertex {
300 m := make(map[string]dag.Vertex)
301 for _, v := range g.Vertices() {
302 if pv, ok := v.(GraphNodeProvider); ok {
303 key := providerMapKey(pv.ProviderName(), v)
304 m[key] = v
305 }
306 }
307
308 return m
309}
310
311func closeProviderVertexMap(g *Graph) map[string]dag.Vertex {
312 m := make(map[string]dag.Vertex)
313 for _, v := range g.Vertices() {
314 if pv, ok := v.(GraphNodeCloseProvider); ok {
315 m[pv.CloseProviderName()] = v
316 }
317 }
318
319 return m
320}
321
322type graphNodeCloseProvider struct {
323 ProviderNameValue string
324}
325
326func (n *graphNodeCloseProvider) Name() string {
327 return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue)
328}
329
330// GraphNodeEvalable impl.
331func (n *graphNodeCloseProvider) EvalTree() EvalNode {
332 return CloseProviderEvalTree(n.ProviderNameValue)
333}
334
335// GraphNodeDependable impl.
336func (n *graphNodeCloseProvider) DependableName() []string {
337 return []string{n.Name()}
338}
339
340func (n *graphNodeCloseProvider) CloseProviderName() string {
341 return n.ProviderNameValue
342}
343
344// GraphNodeDotter impl.
345func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
346 if !opts.Verbose {
347 return nil
348 }
349 return &dag.DotNode{
350 Name: name,
351 Attrs: map[string]string{
352 "label": n.Name(),
353 "shape": "diamond",
354 },
355 }
356}
357
358// RemovableIfNotTargeted
359func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool {
360 // We need to add this so that this node will be removed if
361 // it isn't targeted or a dependency of a target.
362 return true
363}
364
365// graphNodeProviderConsumerDummy is a struct that never enters the real
366// graph (though it could to no ill effect). It implements
367// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force
368// certain transformations.
369type graphNodeProviderConsumerDummy struct {
370 ProviderValue string
371 PathValue []string
372}
373
374func (n *graphNodeProviderConsumerDummy) Path() []string {
375 return n.PathValue
376}
377
378func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string {
379 return []string{n.ProviderValue}
380}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
new file mode 100644
index 0000000..d9919f3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
@@ -0,0 +1,50 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// DisableProviderTransformer "disables" any providers that are not actually
10// used by anything. This avoids the provider being initialized and configured.
11// This both saves resources but also avoids errors since configuration
12// may imply initialization which may require auth.
13type DisableProviderTransformer struct{}
14
15func (t *DisableProviderTransformer) Transform(g *Graph) error {
16 for _, v := range g.Vertices() {
17 // We only care about providers
18 pn, ok := v.(GraphNodeProvider)
19 if !ok || pn.ProviderName() == "" {
20 continue
21 }
22
23 // If we have dependencies, then don't disable
24 if g.UpEdges(v).Len() > 0 {
25 continue
26 }
27
28 // Get the path
29 var path []string
30 if pn, ok := v.(GraphNodeSubPath); ok {
31 path = pn.Path()
32 }
33
34 // Disable the provider by replacing it with a "disabled" provider
35 disabled := &NodeDisabledProvider{
36 NodeAbstractProvider: &NodeAbstractProvider{
37 NameValue: pn.ProviderName(),
38 PathValue: path,
39 },
40 }
41
42 if !g.Replace(v, disabled) {
43 panic(fmt.Sprintf(
44 "vertex disappeared from under us: %s",
45 dag.VertexName(v)))
46 }
47 }
48
49 return nil
50}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
new file mode 100644
index 0000000..f49d824
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
@@ -0,0 +1,206 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/go-multierror"
7 "github.com/hashicorp/terraform/dag"
8)
9
10// GraphNodeProvisioner is an interface that nodes that can be a provisioner
11// must implement. The ProvisionerName returned is the name of the provisioner
12// they satisfy.
13type GraphNodeProvisioner interface {
14 ProvisionerName() string
15}
16
17// GraphNodeCloseProvisioner is an interface that nodes that can be a close
18// provisioner must implement. The CloseProvisionerName returned is the name
19// of the provisioner they satisfy.
20type GraphNodeCloseProvisioner interface {
21 CloseProvisionerName() string
22}
23
24// GraphNodeProvisionerConsumer is an interface that nodes that require
25// a provisioner must implement. ProvisionedBy must return the name of the
26// provisioner to use.
27type GraphNodeProvisionerConsumer interface {
28 ProvisionedBy() []string
29}
30
31// ProvisionerTransformer is a GraphTransformer that maps resources to
32// provisioners within the graph. This will error if there are any resources
33// that don't map to proper resources.
34type ProvisionerTransformer struct{}
35
36func (t *ProvisionerTransformer) Transform(g *Graph) error {
37 // Go through the other nodes and match them to provisioners they need
38 var err error
39 m := provisionerVertexMap(g)
40 for _, v := range g.Vertices() {
41 if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
42 for _, p := range pv.ProvisionedBy() {
43 key := provisionerMapKey(p, pv)
44 if m[key] == nil {
45 err = multierror.Append(err, fmt.Errorf(
46 "%s: provisioner %s couldn't be found",
47 dag.VertexName(v), p))
48 continue
49 }
50
51 g.Connect(dag.BasicEdge(v, m[key]))
52 }
53 }
54 }
55
56 return err
57}
58
59// MissingProvisionerTransformer is a GraphTransformer that adds nodes
60// for missing provisioners into the graph.
61type MissingProvisionerTransformer struct {
62 // Provisioners is the list of provisioners we support.
63 Provisioners []string
64}
65
66func (t *MissingProvisionerTransformer) Transform(g *Graph) error {
67 // Create a set of our supported provisioners
68 supported := make(map[string]struct{}, len(t.Provisioners))
69 for _, v := range t.Provisioners {
70 supported[v] = struct{}{}
71 }
72
73 // Get the map of provisioners we already have in our graph
74 m := provisionerVertexMap(g)
75
76 // Go through all the provisioner consumers and make sure we add
77 // that provisioner if it is missing.
78 for _, v := range g.Vertices() {
79 pv, ok := v.(GraphNodeProvisionerConsumer)
80 if !ok {
81 continue
82 }
83
84 // If this node has a subpath, then we use that as a prefix
85 // into our map to check for an existing provider.
86 var path []string
87 if sp, ok := pv.(GraphNodeSubPath); ok {
88 raw := normalizeModulePath(sp.Path())
89 if len(raw) > len(rootModulePath) {
90 path = raw
91 }
92 }
93
94 for _, p := range pv.ProvisionedBy() {
95 // Build the key for storing in the map
96 key := provisionerMapKey(p, pv)
97
98 if _, ok := m[key]; ok {
99 // This provisioner already exists as a configure node
100 continue
101 }
102
103 if _, ok := supported[p]; !ok {
104 // If we don't support the provisioner type, skip it.
105 // Validation later will catch this as an error.
106 continue
107 }
108
109 // Build the vertex
110 var newV dag.Vertex = &NodeProvisioner{
111 NameValue: p,
112 PathValue: path,
113 }
114
115 // Add the missing provisioner node to the graph
116 m[key] = g.Add(newV)
117 }
118 }
119
120 return nil
121}
122
123// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the
124// graph that will close open provisioner connections that aren't needed
125// anymore. A provisioner connection is not needed anymore once all depended
126// resources in the graph are evaluated.
127type CloseProvisionerTransformer struct{}
128
129func (t *CloseProvisionerTransformer) Transform(g *Graph) error {
130 m := closeProvisionerVertexMap(g)
131 for _, v := range g.Vertices() {
132 if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
133 for _, p := range pv.ProvisionedBy() {
134 source := m[p]
135
136 if source == nil {
137 // Create a new graphNodeCloseProvisioner and add it to the graph
138 source = &graphNodeCloseProvisioner{ProvisionerNameValue: p}
139 g.Add(source)
140
141 // Make sure we also add the new graphNodeCloseProvisioner to the map
142 // so we don't create and add any duplicate graphNodeCloseProvisioners.
143 m[p] = source
144 }
145
146 g.Connect(dag.BasicEdge(source, v))
147 }
148 }
149 }
150
151 return nil
152}
153
154// provisionerMapKey is a helper that gives us the key to use for the
155// maps returned by things such as provisionerVertexMap.
156func provisionerMapKey(k string, v dag.Vertex) string {
157 pathPrefix := ""
158 if sp, ok := v.(GraphNodeSubPath); ok {
159 raw := normalizeModulePath(sp.Path())
160 if len(raw) > len(rootModulePath) {
161 pathPrefix = modulePrefixStr(raw) + "."
162 }
163 }
164
165 return pathPrefix + k
166}
167
168func provisionerVertexMap(g *Graph) map[string]dag.Vertex {
169 m := make(map[string]dag.Vertex)
170 for _, v := range g.Vertices() {
171 if pv, ok := v.(GraphNodeProvisioner); ok {
172 key := provisionerMapKey(pv.ProvisionerName(), v)
173 m[key] = v
174 }
175 }
176
177 return m
178}
179
180func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex {
181 m := make(map[string]dag.Vertex)
182 for _, v := range g.Vertices() {
183 if pv, ok := v.(GraphNodeCloseProvisioner); ok {
184 m[pv.CloseProvisionerName()] = v
185 }
186 }
187
188 return m
189}
190
191type graphNodeCloseProvisioner struct {
192 ProvisionerNameValue string
193}
194
195func (n *graphNodeCloseProvisioner) Name() string {
196 return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue)
197}
198
199// GraphNodeEvalable impl.
200func (n *graphNodeCloseProvisioner) EvalTree() EvalNode {
201 return &EvalCloseProvisioner{Name: n.ProvisionerNameValue}
202}
203
204func (n *graphNodeCloseProvisioner) CloseProvisionerName() string {
205 return n.ProvisionerNameValue
206}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
new file mode 100644
index 0000000..c545235
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
@@ -0,0 +1,321 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "strings"
7
8 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/dag"
10)
11
12// GraphNodeReferenceable must be implemented by any node that represents
13// a Terraform thing that can be referenced (resource, module, etc.).
14//
15// Even if the thing has no name, this should return an empty list. By
16// implementing this and returning a non-nil result, you say that this CAN
17// be referenced and other methods of referencing may still be possible (such
18// as by path!)
19type GraphNodeReferenceable interface {
20 // ReferenceableName is the name by which this can be referenced.
21 // This can be either just the type, or include the field. Example:
22 // "aws_instance.bar" or "aws_instance.bar.id".
23 ReferenceableName() []string
24}
25
26// GraphNodeReferencer must be implemented by nodes that reference other
27// Terraform items and therefore depend on them.
28type GraphNodeReferencer interface {
29 // References are the list of things that this node references. This
30 // can include fields or just the type, just like GraphNodeReferenceable
31 // above.
32 References() []string
33}
34
35// GraphNodeReferenceGlobal is an interface that can optionally be
36// implemented. If ReferenceGlobal returns true, then the References()
37// and ReferenceableName() must be _fully qualified_ with "module.foo.bar"
38// etc.
39//
40// This allows a node to reference and be referenced by a specific name
41// that may cross module boundaries. This can be very dangerous so use
42// this wisely.
43//
44// The primary use case for this is module boundaries (variables coming in).
45type GraphNodeReferenceGlobal interface {
46 // Set to true to signal that references and name are fully
47 // qualified. See the above docs for more information.
48 ReferenceGlobal() bool
49}
50
51// ReferenceTransformer is a GraphTransformer that connects all the
52// nodes that reference each other in order to form the proper ordering.
53type ReferenceTransformer struct{}
54
55func (t *ReferenceTransformer) Transform(g *Graph) error {
56 // Build a reference map so we can efficiently look up the references
57 vs := g.Vertices()
58 m := NewReferenceMap(vs)
59
60 // Find the things that reference things and connect them
61 for _, v := range vs {
62 parents, _ := m.References(v)
63 parentsDbg := make([]string, len(parents))
64 for i, v := range parents {
65 parentsDbg[i] = dag.VertexName(v)
66 }
67 log.Printf(
68 "[DEBUG] ReferenceTransformer: %q references: %v",
69 dag.VertexName(v), parentsDbg)
70
71 for _, parent := range parents {
72 g.Connect(dag.BasicEdge(v, parent))
73 }
74 }
75
76 return nil
77}
78
79// ReferenceMap is a structure that can be used to efficiently check
80// for references on a graph.
81type ReferenceMap struct {
82 // m is the mapping of referenceable name to list of verticies that
83 // implement that name. This is built on initialization.
84 references map[string][]dag.Vertex
85 referencedBy map[string][]dag.Vertex
86}
87
88// References returns the list of vertices that this vertex
89// references along with any missing references.
90func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) {
91 rn, ok := v.(GraphNodeReferencer)
92 if !ok {
93 return nil, nil
94 }
95
96 var matches []dag.Vertex
97 var missing []string
98 prefix := m.prefix(v)
99 for _, ns := range rn.References() {
100 found := false
101 for _, n := range strings.Split(ns, "/") {
102 n = prefix + n
103 parents, ok := m.references[n]
104 if !ok {
105 continue
106 }
107
108 // Mark that we found a match
109 found = true
110
111 // Make sure this isn't a self reference, which isn't included
112 selfRef := false
113 for _, p := range parents {
114 if p == v {
115 selfRef = true
116 break
117 }
118 }
119 if selfRef {
120 continue
121 }
122
123 matches = append(matches, parents...)
124 break
125 }
126
127 if !found {
128 missing = append(missing, ns)
129 }
130 }
131
132 return matches, missing
133}
134
135// ReferencedBy returns the list of vertices that reference the
136// vertex passed in.
137func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex {
138 rn, ok := v.(GraphNodeReferenceable)
139 if !ok {
140 return nil
141 }
142
143 var matches []dag.Vertex
144 prefix := m.prefix(v)
145 for _, n := range rn.ReferenceableName() {
146 n = prefix + n
147 children, ok := m.referencedBy[n]
148 if !ok {
149 continue
150 }
151
152 // Make sure this isn't a self reference, which isn't included
153 selfRef := false
154 for _, p := range children {
155 if p == v {
156 selfRef = true
157 break
158 }
159 }
160 if selfRef {
161 continue
162 }
163
164 matches = append(matches, children...)
165 }
166
167 return matches
168}
169
170func (m *ReferenceMap) prefix(v dag.Vertex) string {
171 // If the node is stating it is already fully qualified then
172 // we don't have to create the prefix!
173 if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() {
174 return ""
175 }
176
177 // Create the prefix based on the path
178 var prefix string
179 if pn, ok := v.(GraphNodeSubPath); ok {
180 if path := normalizeModulePath(pn.Path()); len(path) > 1 {
181 prefix = modulePrefixStr(path) + "."
182 }
183 }
184
185 return prefix
186}
187
188// NewReferenceMap is used to create a new reference map for the
189// given set of vertices.
190func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
191 var m ReferenceMap
192
193 // Build the lookup table
194 refMap := make(map[string][]dag.Vertex)
195 for _, v := range vs {
196 // We're only looking for referenceable nodes
197 rn, ok := v.(GraphNodeReferenceable)
198 if !ok {
199 continue
200 }
201
202 // Go through and cache them
203 prefix := m.prefix(v)
204 for _, n := range rn.ReferenceableName() {
205 n = prefix + n
206 refMap[n] = append(refMap[n], v)
207 }
208
209 // If there is a path, it is always referenceable by that. For
210 // example, if this is a referenceable thing at path []string{"foo"},
211 // then it can be referenced at "module.foo"
212 if pn, ok := v.(GraphNodeSubPath); ok {
213 for _, p := range ReferenceModulePath(pn.Path()) {
214 refMap[p] = append(refMap[p], v)
215 }
216 }
217 }
218
219 // Build the lookup table for referenced by
220 refByMap := make(map[string][]dag.Vertex)
221 for _, v := range vs {
222 // We're only looking for referenceable nodes
223 rn, ok := v.(GraphNodeReferencer)
224 if !ok {
225 continue
226 }
227
228 // Go through and cache them
229 prefix := m.prefix(v)
230 for _, n := range rn.References() {
231 n = prefix + n
232 refByMap[n] = append(refByMap[n], v)
233 }
234 }
235
236 m.references = refMap
237 m.referencedBy = refByMap
238 return &m
239}
240
241// Returns the reference name for a module path. The path "foo" would return
242// "module.foo". If this is a deeply nested module, it will be every parent
243// as well. For example: ["foo", "bar"] would return both "module.foo" and
244// "module.foo.module.bar"
245func ReferenceModulePath(p []string) []string {
246 p = normalizeModulePath(p)
247 if len(p) == 1 {
248 // Root, no name
249 return nil
250 }
251
252 result := make([]string, 0, len(p)-1)
253 for i := len(p); i > 1; i-- {
254 result = append(result, modulePrefixStr(p[:i]))
255 }
256
257 return result
258}
259
260// ReferencesFromConfig returns the references that a configuration has
261// based on the interpolated variables in a configuration.
262func ReferencesFromConfig(c *config.RawConfig) []string {
263 var result []string
264 for _, v := range c.Variables {
265 if r := ReferenceFromInterpolatedVar(v); len(r) > 0 {
266 result = append(result, r...)
267 }
268 }
269
270 return result
271}
272
273// ReferenceFromInterpolatedVar returns the reference from this variable,
274// or an empty string if there is no reference.
275func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string {
276 switch v := v.(type) {
277 case *config.ModuleVariable:
278 return []string{fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)}
279 case *config.ResourceVariable:
280 id := v.ResourceId()
281
282 // If we have a multi-reference (splat), then we depend on ALL
283 // resources with this type/name.
284 if v.Multi && v.Index == -1 {
285 return []string{fmt.Sprintf("%s.*", id)}
286 }
287
288 // Otherwise, we depend on a specific index.
289 idx := v.Index
290 if !v.Multi || v.Index == -1 {
291 idx = 0
292 }
293
294 // Depend on the index, as well as "N" which represents the
295 // un-expanded set of resources.
296 return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)}
297 case *config.UserVariable:
298 return []string{fmt.Sprintf("var.%s", v.Name)}
299 default:
300 return nil
301 }
302}
303
304func modulePrefixStr(p []string) string {
305 parts := make([]string, 0, len(p)*2)
306 for _, p := range p[1:] {
307 parts = append(parts, "module", p)
308 }
309
310 return strings.Join(parts, ".")
311}
312
313func modulePrefixList(result []string, prefix string) []string {
314 if prefix != "" {
315 for i, v := range result {
316 result[i] = fmt.Sprintf("%s.%s", prefix, v)
317 }
318 }
319
320 return result
321}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
new file mode 100644
index 0000000..cda35cb
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
@@ -0,0 +1,51 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// ResourceCountTransformer is a GraphTransformer that expands the count
10// out for a specific resource.
11//
12// This assumes that the count is already interpolated.
13type ResourceCountTransformer struct {
14 Concrete ConcreteResourceNodeFunc
15
16 Count int
17 Addr *ResourceAddress
18}
19
20func (t *ResourceCountTransformer) Transform(g *Graph) error {
21 // Don't allow the count to be negative
22 if t.Count < 0 {
23 return fmt.Errorf("negative count: %d", t.Count)
24 }
25
26 // For each count, build and add the node
27 for i := 0; i < t.Count; i++ {
28 // Set the index. If our count is 1 we special case it so that
29 // we handle the "resource.0" and "resource" boundary properly.
30 index := i
31 if t.Count == 1 {
32 index = -1
33 }
34
35 // Build the resource address
36 addr := t.Addr.Copy()
37 addr.Index = index
38
39 // Build the abstract node and the concrete one
40 abstract := &NodeAbstractResource{Addr: addr}
41 var node dag.Vertex = abstract
42 if f := t.Concrete; f != nil {
43 node = f(abstract)
44 }
45
46 // Add it to the graph
47 g.Add(node)
48 }
49
50 return nil
51}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go
new file mode 100644
index 0000000..aee053d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go
@@ -0,0 +1,38 @@
1package terraform
2
3import "github.com/hashicorp/terraform/dag"
4
5const rootNodeName = "root"
6
7// RootTransformer is a GraphTransformer that adds a root to the graph.
8type RootTransformer struct{}
9
10func (t *RootTransformer) Transform(g *Graph) error {
11 // If we already have a good root, we're done
12 if _, err := g.Root(); err == nil {
13 return nil
14 }
15
16 // Add a root
17 var root graphNodeRoot
18 g.Add(root)
19
20 // Connect the root to all the edges that need it
21 for _, v := range g.Vertices() {
22 if v == root {
23 continue
24 }
25
26 if g.UpEdges(v).Len() == 0 {
27 g.Connect(dag.BasicEdge(root, v))
28 }
29 }
30
31 return nil
32}
33
34type graphNodeRoot struct{}
35
36func (n graphNodeRoot) Name() string {
37 return rootNodeName
38}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
new file mode 100644
index 0000000..471cd74
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
@@ -0,0 +1,65 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/dag"
8)
9
10// StateTransformer is a GraphTransformer that adds the elements of
11// the state to the graph.
12//
13// This transform is used for example by the DestroyPlanGraphBuilder to ensure
14// that only resources that are in the state are represented in the graph.
15type StateTransformer struct {
16 Concrete ConcreteResourceNodeFunc
17
18 State *State
19}
20
21func (t *StateTransformer) Transform(g *Graph) error {
22 // If the state is nil or empty (nil is empty) then do nothing
23 if t.State.Empty() {
24 return nil
25 }
26
27 // Go through all the modules in the diff.
28 log.Printf("[TRACE] StateTransformer: starting")
29 var nodes []dag.Vertex
30 for _, ms := range t.State.Modules {
31 log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path)
32
33 // Go through all the resources in this module.
34 for name, rs := range ms.Resources {
35 log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs)
36
37 // Add the resource to the graph
38 addr, err := parseResourceAddressInternal(name)
39 if err != nil {
40 panic(fmt.Sprintf(
41 "Error parsing internal name, this is a bug: %q", name))
42 }
43
44 // Very important: add the module path for this resource to
45 // the address. Remove "root" from it.
46 addr.Path = ms.Path[1:]
47
48 // Add the resource to the graph
49 abstract := &NodeAbstractResource{Addr: addr}
50 var node dag.Vertex = abstract
51 if f := t.Concrete; f != nil {
52 node = f(abstract)
53 }
54
55 nodes = append(nodes, node)
56 }
57 }
58
59 // Add all the nodes to the graph
60 for _, n := range nodes {
61 g.Add(n)
62 }
63
64 return nil
65}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
new file mode 100644
index 0000000..225ac4b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
@@ -0,0 +1,144 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// GraphNodeTargetable is an interface for graph nodes to implement when they
10// need to be told about incoming targets. This is useful for nodes that need
11// to respect targets as they dynamically expand. Note that the list of targets
12// provided will contain every target provided, and each implementing graph
13// node must filter this list to targets considered relevant.
14type GraphNodeTargetable interface {
15 SetTargets([]ResourceAddress)
16}
17
18// TargetsTransformer is a GraphTransformer that, when the user specifies a
19// list of resources to target, limits the graph to only those resources and
20// their dependencies.
21type TargetsTransformer struct {
22 // List of targeted resource names specified by the user
23 Targets []string
24
25 // List of parsed targets, provided by callers like ResourceCountTransform
26 // that already have the targets parsed
27 ParsedTargets []ResourceAddress
28
29 // Set to true when we're in a `terraform destroy` or a
30 // `terraform plan -destroy`
31 Destroy bool
32}
33
34func (t *TargetsTransformer) Transform(g *Graph) error {
35 if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 {
36 addrs, err := t.parseTargetAddresses()
37 if err != nil {
38 return err
39 }
40
41 t.ParsedTargets = addrs
42 }
43
44 if len(t.ParsedTargets) > 0 {
45 targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets)
46 if err != nil {
47 return err
48 }
49
50 for _, v := range g.Vertices() {
51 removable := false
52 if _, ok := v.(GraphNodeResource); ok {
53 removable = true
54 }
55 if vr, ok := v.(RemovableIfNotTargeted); ok {
56 removable = vr.RemoveIfNotTargeted()
57 }
58 if removable && !targetedNodes.Include(v) {
59 log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v))
60 g.Remove(v)
61 }
62 }
63 }
64
65 return nil
66}
67
68func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) {
69 addrs := make([]ResourceAddress, len(t.Targets))
70 for i, target := range t.Targets {
71 ta, err := ParseResourceAddress(target)
72 if err != nil {
73 return nil, err
74 }
75 addrs[i] = *ta
76 }
77
78 return addrs, nil
79}
80
81// Returns the list of targeted nodes. A targeted node is either addressed
82// directly, or is an Ancestor of a targeted node. Destroy mode keeps
83// Descendents instead of Ancestors.
84func (t *TargetsTransformer) selectTargetedNodes(
85 g *Graph, addrs []ResourceAddress) (*dag.Set, error) {
86 targetedNodes := new(dag.Set)
87 for _, v := range g.Vertices() {
88 if t.nodeIsTarget(v, addrs) {
89 targetedNodes.Add(v)
90
91 // We inform nodes that ask about the list of targets - helps for nodes
92 // that need to dynamically expand. Note that this only occurs for nodes
93 // that are already directly targeted.
94 if tn, ok := v.(GraphNodeTargetable); ok {
95 tn.SetTargets(addrs)
96 }
97
98 var deps *dag.Set
99 var err error
100 if t.Destroy {
101 deps, err = g.Descendents(v)
102 } else {
103 deps, err = g.Ancestors(v)
104 }
105 if err != nil {
106 return nil, err
107 }
108
109 for _, d := range deps.List() {
110 targetedNodes.Add(d)
111 }
112 }
113 }
114
115 return targetedNodes, nil
116}
117
118func (t *TargetsTransformer) nodeIsTarget(
119 v dag.Vertex, addrs []ResourceAddress) bool {
120 r, ok := v.(GraphNodeResource)
121 if !ok {
122 return false
123 }
124
125 addr := r.ResourceAddr()
126 for _, targetAddr := range addrs {
127 if targetAddr.Equals(addr) {
128 return true
129 }
130 }
131
132 return false
133}
134
135// RemovableIfNotTargeted is a special interface for graph nodes that
136// aren't directly addressable, but need to be removed from the graph when they
137// are not targeted. (Nodes that are not directly targeted end up in the set of
138// targeted nodes because something that _is_ targeted depends on them.) The
139// initial use case for this interface is GraphNodeConfigVariable, which was
140// having trouble interpolating for module variables in targeted scenarios that
141// filtered out the resource node being referenced.
142type RemovableIfNotTargeted interface {
143 RemoveIfNotTargeted() bool
144}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go
new file mode 100644
index 0000000..2184278
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go
@@ -0,0 +1,20 @@
1package terraform
2
3// TransitiveReductionTransformer is a GraphTransformer that performs
4// finds the transitive reduction of the graph. For a definition of
5// transitive reduction, see Wikipedia.
6type TransitiveReductionTransformer struct{}
7
8func (t *TransitiveReductionTransformer) Transform(g *Graph) error {
9 // If the graph isn't valid, skip the transitive reduction.
10 // We don't error here because Terraform itself handles graph
11 // validation in a better way, or we assume it does.
12 if err := g.Validate(); err != nil {
13 return nil
14 }
15
16 // Do it
17 g.TransitiveReduction()
18
19 return nil
20}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
new file mode 100644
index 0000000..b31e2c7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
@@ -0,0 +1,40 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/config/module"
5)
6
7// RootVariableTransformer is a GraphTransformer that adds all the root
8// variables to the graph.
9//
10// Root variables are currently no-ops but they must be added to the
11// graph since downstream things that depend on them must be able to
12// reach them.
13type RootVariableTransformer struct {
14 Module *module.Tree
15}
16
17func (t *RootVariableTransformer) Transform(g *Graph) error {
18 // If no config, no variables
19 if t.Module == nil {
20 return nil
21 }
22
23 // If we have no vars, we're done!
24 vars := t.Module.Config().Variables
25 if len(vars) == 0 {
26 return nil
27 }
28
29 // Add all variables here
30 for _, v := range vars {
31 node := &NodeRootVariable{
32 Config: v,
33 }
34
35 // Add it!
36 g.Add(node)
37 }
38
39 return nil
40}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go
new file mode 100644
index 0000000..6b1293f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go
@@ -0,0 +1,44 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
7)
8
9// VertexTransformer is a GraphTransformer that transforms vertices
10// using the GraphVertexTransformers. The Transforms are run in sequential
11// order. If a transform replaces a vertex then the next transform will see
12// the new vertex.
13type VertexTransformer struct {
14 Transforms []GraphVertexTransformer
15}
16
17func (t *VertexTransformer) Transform(g *Graph) error {
18 for _, v := range g.Vertices() {
19 for _, vt := range t.Transforms {
20 newV, err := vt.Transform(v)
21 if err != nil {
22 return err
23 }
24
25 // If the vertex didn't change, then don't do anything more
26 if newV == v {
27 continue
28 }
29
30 // Vertex changed, replace it within the graph
31 if ok := g.Replace(v, newV); !ok {
32 // This should never happen, big problem
33 return fmt.Errorf(
34 "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v",
35 dag.VertexName(v), dag.VertexName(newV), v, newV)
36 }
37
38 // Replace v so that future transforms use the proper vertex
39 v = newV
40 }
41 }
42
43 return nil
44}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
new file mode 100644
index 0000000..7c87459
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
@@ -0,0 +1,26 @@
1package terraform
2
3// UIInput is the interface that must be implemented to ask for input
4// from this user. This should forward the request to wherever the user
5// inputs things to ask for values.
6type UIInput interface {
7 Input(*InputOpts) (string, error)
8}
9
10// InputOpts are options for asking for input.
11type InputOpts struct {
12 // Id is a unique ID for the question being asked that might be
13 // used for logging or to look up a prior answered question.
14 Id string
15
16 // Query is a human-friendly question for inputting this value.
17 Query string
18
19 // Description is a description about what this option is. Be wary
20 // that this will probably be in a terminal so split lines as you see
21 // necessary.
22 Description string
23
24 // Default will be the value returned if no data is entered.
25 Default string
26}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
new file mode 100644
index 0000000..e3a07ef
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
@@ -0,0 +1,23 @@
1package terraform
2
3// MockUIInput is an implementation of UIInput that can be used for tests.
4type MockUIInput struct {
5 InputCalled bool
6 InputOpts *InputOpts
7 InputReturnMap map[string]string
8 InputReturnString string
9 InputReturnError error
10 InputFn func(*InputOpts) (string, error)
11}
12
13func (i *MockUIInput) Input(opts *InputOpts) (string, error) {
14 i.InputCalled = true
15 i.InputOpts = opts
16 if i.InputFn != nil {
17 return i.InputFn(opts)
18 }
19 if i.InputReturnMap != nil {
20 return i.InputReturnMap[opts.Id], i.InputReturnError
21 }
22 return i.InputReturnString, i.InputReturnError
23}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
new file mode 100644
index 0000000..2207d1d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
@@ -0,0 +1,19 @@
1package terraform
2
3import (
4 "fmt"
5)
6
7// PrefixUIInput is an implementation of UIInput that prefixes the ID
8// with a string, allowing queries to be namespaced.
9type PrefixUIInput struct {
10 IdPrefix string
11 QueryPrefix string
12 UIInput UIInput
13}
14
15func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) {
16 opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id)
17 opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query)
18 return i.UIInput.Input(opts)
19}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go
new file mode 100644
index 0000000..84427c6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go
@@ -0,0 +1,7 @@
1package terraform
2
3// UIOutput is the interface that must be implemented to output
4// data to the end user.
5type UIOutput interface {
6 Output(string)
7}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go
new file mode 100644
index 0000000..135a91c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go
@@ -0,0 +1,9 @@
1package terraform
2
3type CallbackUIOutput struct {
4 OutputFn func(string)
5}
6
7func (o *CallbackUIOutput) Output(v string) {
8 o.OutputFn(v)
9}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
new file mode 100644
index 0000000..7852bc4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
@@ -0,0 +1,16 @@
1package terraform
2
3// MockUIOutput is an implementation of UIOutput that can be used for tests.
4type MockUIOutput struct {
5 OutputCalled bool
6 OutputMessage string
7 OutputFn func(string)
8}
9
10func (o *MockUIOutput) Output(v string) {
11 o.OutputCalled = true
12 o.OutputMessage = v
13 if o.OutputFn != nil {
14 o.OutputFn(v)
15 }
16}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
new file mode 100644
index 0000000..878a031
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
@@ -0,0 +1,15 @@
1package terraform
2
3// ProvisionerUIOutput is an implementation of UIOutput that calls a hook
4// for the output so that the hooks can handle it.
5type ProvisionerUIOutput struct {
6 Info *InstanceInfo
7 Type string
8 Hooks []Hook
9}
10
11func (o *ProvisionerUIOutput) Output(msg string) {
12 for _, h := range o.Hooks {
13 h.ProvisionOutput(o.Info, o.Type, msg)
14 }
15}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go
new file mode 100644
index 0000000..f41f0d7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/util.go
@@ -0,0 +1,93 @@
1package terraform
2
3import (
4 "sort"
5 "strings"
6)
7
8// Semaphore is a wrapper around a channel to provide
9// utility methods to clarify that we are treating the
10// channel as a semaphore
11type Semaphore chan struct{}
12
13// NewSemaphore creates a semaphore that allows up
14// to a given limit of simultaneous acquisitions
15func NewSemaphore(n int) Semaphore {
16 if n == 0 {
17 panic("semaphore with limit 0")
18 }
19 ch := make(chan struct{}, n)
20 return Semaphore(ch)
21}
22
23// Acquire is used to acquire an available slot.
24// Blocks until available.
25func (s Semaphore) Acquire() {
26 s <- struct{}{}
27}
28
29// TryAcquire is used to do a non-blocking acquire.
30// Returns a bool indicating success
31func (s Semaphore) TryAcquire() bool {
32 select {
33 case s <- struct{}{}:
34 return true
35 default:
36 return false
37 }
38}
39
40// Release is used to return a slot. Acquire must
41// be called as a pre-condition.
42func (s Semaphore) Release() {
43 select {
44 case <-s:
45 default:
46 panic("release without an acquire")
47 }
48}
49
50// resourceProvider returns the provider name for the given type.
51func resourceProvider(t, alias string) string {
52 if alias != "" {
53 return alias
54 }
55
56 idx := strings.IndexRune(t, '_')
57 if idx == -1 {
58 // If no underscores, the resource name is assumed to be
59 // also the provider name, e.g. if the provider exposes
60 // only a single resource of each type.
61 return t
62 }
63
64 return t[:idx]
65}
66
67// strSliceContains checks if a given string is contained in a slice
68// When anybody asks why Go needs generics, here you go.
69func strSliceContains(haystack []string, needle string) bool {
70 for _, s := range haystack {
71 if s == needle {
72 return true
73 }
74 }
75 return false
76}
77
78// deduplicate a slice of strings
79func uniqueStrings(s []string) []string {
80 if len(s) < 2 {
81 return s
82 }
83
84 sort.Strings(s)
85 result := make([]string, 1, len(s))
86 result[0] = s[0]
87 for i := 1; i < len(s); i++ {
88 if s[i] != result[len(result)-1] {
89 result = append(result, s[i])
90 }
91 }
92 return result
93}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go
new file mode 100644
index 0000000..300f2ad
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/variables.go
@@ -0,0 +1,166 @@
1package terraform
2
3import (
4 "fmt"
5 "os"
6 "strings"
7
8 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/config/module"
10 "github.com/hashicorp/terraform/helper/hilmapstructure"
11)
12
13// Variables returns the fully loaded set of variables to use with
14// ContextOpts and NewContext, loading any additional variables from
15// the environment or any other sources.
16//
17// The given module tree doesn't need to be loaded.
18func Variables(
19 m *module.Tree,
20 override map[string]interface{}) (map[string]interface{}, error) {
21 result := make(map[string]interface{})
22
23 // Variables are loaded in the following sequence. Each additional step
24 // will override conflicting variable keys from prior steps:
25 //
26 // * Take default values from config
27 // * Take values from TF_VAR_x env vars
28 // * Take values specified in the "override" param which is usually
29 // from -var, -var-file, etc.
30 //
31
32 // First load from the config
33 for _, v := range m.Config().Variables {
34 // If the var has no default, ignore
35 if v.Default == nil {
36 continue
37 }
38
39 // If the type isn't a string, we use it as-is since it is a rich type
40 if v.Type() != config.VariableTypeString {
41 result[v.Name] = v.Default
42 continue
43 }
44
45 // v.Default has already been parsed as HCL but it may be an int type
46 switch typedDefault := v.Default.(type) {
47 case string:
48 if typedDefault == "" {
49 continue
50 }
51 result[v.Name] = typedDefault
52 case int, int64:
53 result[v.Name] = fmt.Sprintf("%d", typedDefault)
54 case float32, float64:
55 result[v.Name] = fmt.Sprintf("%f", typedDefault)
56 case bool:
57 result[v.Name] = fmt.Sprintf("%t", typedDefault)
58 default:
59 panic(fmt.Sprintf(
60 "Unknown default var type: %T\n\n"+
61 "THIS IS A BUG. Please report it.",
62 v.Default))
63 }
64 }
65
66 // Load from env vars
67 for _, v := range os.Environ() {
68 if !strings.HasPrefix(v, VarEnvPrefix) {
69 continue
70 }
71
72 // Strip off the prefix and get the value after the first "="
73 idx := strings.Index(v, "=")
74 k := v[len(VarEnvPrefix):idx]
75 v = v[idx+1:]
76
77 // Override the configuration-default values. Note that *not* finding the variable
78 // in configuration is OK, as we don't want to preclude people from having multiple
79 // sets of TF_VAR_whatever in their environment even if it is a little weird.
80 for _, schema := range m.Config().Variables {
81 if schema.Name != k {
82 continue
83 }
84
85 varType := schema.Type()
86 varVal, err := parseVariableAsHCL(k, v, varType)
87 if err != nil {
88 return nil, err
89 }
90
91 switch varType {
92 case config.VariableTypeMap:
93 if err := varSetMap(result, k, varVal); err != nil {
94 return nil, err
95 }
96 default:
97 result[k] = varVal
98 }
99 }
100 }
101
102 // Load from overrides
103 for k, v := range override {
104 for _, schema := range m.Config().Variables {
105 if schema.Name != k {
106 continue
107 }
108
109 switch schema.Type() {
110 case config.VariableTypeList:
111 result[k] = v
112 case config.VariableTypeMap:
113 if err := varSetMap(result, k, v); err != nil {
114 return nil, err
115 }
116 case config.VariableTypeString:
117 // Convert to a string and set. We don't catch any errors
118 // here because the validation step later should catch
119 // any type errors.
120 var strVal string
121 if err := hilmapstructure.WeakDecode(v, &strVal); err == nil {
122 result[k] = strVal
123 } else {
124 result[k] = v
125 }
126 default:
127 panic(fmt.Sprintf(
128 "Unhandled var type: %T\n\n"+
129 "THIS IS A BUG. Please report it.",
130 schema.Type()))
131 }
132 }
133 }
134
135 return result, nil
136}
137
138// varSetMap sets or merges the map in "v" with the key "k" in the
139// "current" set of variables. This is just a private function to remove
140// duplicate logic in Variables
141func varSetMap(current map[string]interface{}, k string, v interface{}) error {
142 existing, ok := current[k]
143 if !ok {
144 current[k] = v
145 return nil
146 }
147
148 existingMap, ok := existing.(map[string]interface{})
149 if !ok {
150 panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k))
151 }
152
153 switch typedV := v.(type) {
154 case []map[string]interface{}:
155 for newKey, newVal := range typedV[0] {
156 existingMap[newKey] = newVal
157 }
158 case map[string]interface{}:
159 for newKey, newVal := range typedV {
160 existingMap[newKey] = newVal
161 }
162 default:
163 return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v))
164 }
165 return nil
166}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go
new file mode 100644
index 0000000..93fb429
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/version.go
@@ -0,0 +1,31 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/go-version"
7)
8
9// The main version number that is being run at the moment.
10const Version = "0.9.5"
11
12// A pre-release marker for the version. If this is "" (empty string)
13// then it means that it is a final release. Otherwise, this is a pre-release
14// such as "dev" (in development), "beta", "rc1", etc.
15const VersionPrerelease = ""
16
17// SemVersion is an instance of version.Version. This has the secondary
18// benefit of verifying during tests and init time that our version is a
19// proper semantic version, which should always be the case.
20var SemVersion = version.Must(version.NewVersion(Version))
21
22// VersionHeader is the header name used to send the current terraform version
23// in http requests.
24const VersionHeader = "Terraform-Version"
25
26func VersionString() string {
27 if VersionPrerelease != "" {
28 return fmt.Sprintf("%s-%s", Version, VersionPrerelease)
29 }
30 return Version
31}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
new file mode 100644
index 0000000..3cbbf56
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
@@ -0,0 +1,69 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/go-version"
7 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/config/module"
9)
10
11// checkRequiredVersion verifies that any version requirements specified by
12// the configuration are met.
13//
14// This checks the root module as well as any additional version requirements
15// from child modules.
16//
17// This is tested in context_test.go.
18func checkRequiredVersion(m *module.Tree) error {
19 // Check any children
20 for _, c := range m.Children() {
21 if err := checkRequiredVersion(c); err != nil {
22 return err
23 }
24 }
25
26 var tf *config.Terraform
27 if c := m.Config(); c != nil {
28 tf = c.Terraform
29 }
30
31 // If there is no Terraform config or the required version isn't set,
32 // we move on.
33 if tf == nil || tf.RequiredVersion == "" {
34 return nil
35 }
36
37 // Path for errors
38 module := "root"
39 if path := normalizeModulePath(m.Path()); len(path) > 1 {
40 module = modulePrefixStr(path)
41 }
42
43 // Check this version requirement of this module
44 cs, err := version.NewConstraint(tf.RequiredVersion)
45 if err != nil {
46 return fmt.Errorf(
47 "%s: terraform.required_version %q syntax error: %s",
48 module,
49 tf.RequiredVersion, err)
50 }
51
52 if !cs.Check(SemVersion) {
53 return fmt.Errorf(
54 "The currently running version of Terraform doesn't meet the\n"+
55 "version requirements explicitly specified by the configuration.\n"+
56 "Please use the required version or update the configuration.\n"+
57 "Note that version requirements are usually set for a reason, so\n"+
58 "we recommend verifying with whoever set the version requirements\n"+
59 "prior to making any manual changes.\n\n"+
60 " Module: %s\n"+
61 " Required version: %s\n"+
62 " Current version: %s",
63 module,
64 tf.RequiredVersion,
65 SemVersion)
66 }
67
68 return nil
69}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
new file mode 100644
index 0000000..cbd78dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
@@ -0,0 +1,16 @@
1// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT.
2
3package terraform
4
5import "fmt"
6
7const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport"
8
9var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96}
10
11func (i walkOperation) String() string {
12 if i >= walkOperation(len(_walkOperation_index)-1) {
13 return fmt.Sprintf("walkOperation(%d)", i)
14 }
15 return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE
new file mode 100644
index 0000000..f0e5c79
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/LICENSE
@@ -0,0 +1,362 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. "Contributor"
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. "Contributor Version"
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor's Contribution.
14
151.3. "Contribution"
16
17 means Covered Software of a particular Contributor.
18
191.4. "Covered Software"
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. "Incompatible With Secondary Licenses"
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of
33 version 1.1 or earlier of the License, but not also under the terms of
34 a Secondary License.
35
361.6. "Executable Form"
37
38 means any form of the work other than Source Code Form.
39
401.7. "Larger Work"
41
42 means a work that combines Covered Software with other material, in a
43 separate file or files, that is not Covered Software.
44
451.8. "License"
46
47 means this document.
48
491.9. "Licensable"
50
51 means having the right to grant, to the maximum extent possible, whether
52 at the time of the initial grant or subsequently, any and all of the
53 rights conveyed by this License.
54
551.10. "Modifications"
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to,
60 deletion from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. "Patent Claims" of a Contributor
65
66 means any patent claim(s), including without limitation, method,
67 process, and apparatus claims, in any patent Licensable by such
68 Contributor that would be infringed, but for the grant of the License,
69 by the making, using, selling, offering for sale, having made, import,
70 or transfer of either its Contributions or its Contributor Version.
71
721.12. "Secondary License"
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. "Source Code Form"
79
80 means the form of the work preferred for making modifications.
81
821.14. "You" (or "Your")
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, "You" includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, "control" means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or
104 as part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its
108 Contributions or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution
113 become effective for each Contribution on the date the Contributor first
114 distributes such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under
119 this License. No additional rights or licenses will be implied from the
120 distribution or licensing of Covered Software under this License.
121 Notwithstanding Section 2.1(b) above, no patent license is granted by a
122 Contributor:
123
124 a. for any code that a Contributor has removed from Covered Software; or
125
126 b. for infringements caused by: (i) Your and any other third party's
127 modifications of Covered Software, or (ii) the combination of its
128 Contributions with other software (except as part of its Contributor
129 Version); or
130
131 c. under Patent Claims infringed by Covered Software in the absence of
132 its Contributions.
133
134 This License does not grant any rights in the trademarks, service marks,
135 or logos of any Contributor (except as may be necessary to comply with
136 the notice requirements in Section 3.4).
137
1382.4. Subsequent Licenses
139
140 No Contributor makes additional grants as a result of Your choice to
141 distribute the Covered Software under a subsequent version of this
142 License (see Section 10.2) or under the terms of a Secondary License (if
143 permitted under the terms of Section 3.3).
144
1452.5. Representation
146
147 Each Contributor represents that the Contributor believes its
148 Contributions are its original creation(s) or it has sufficient rights to
149 grant the rights to its Contributions conveyed by this License.
150
1512.6. Fair Use
152
153 This License is not intended to limit any rights You have under
154 applicable copyright doctrines of fair use, fair dealing, or other
155 equivalents.
156
1572.7. Conditions
158
159 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
160 Section 2.1.
161
162
1633. Responsibilities
164
1653.1. Distribution of Source Form
166
167 All distribution of Covered Software in Source Code Form, including any
168 Modifications that You create or to which You contribute, must be under
169 the terms of this License. You must inform recipients that the Source
170 Code Form of the Covered Software is governed by the terms of this
171 License, and how they can obtain a copy of this License. You may not
172 attempt to alter or restrict the recipients' rights in the Source Code
173 Form.
174
1753.2. Distribution of Executable Form
176
177 If You distribute Covered Software in Executable Form then:
178
179 a. such Covered Software must also be made available in Source Code Form,
180 as described in Section 3.1, and You must inform recipients of the
181 Executable Form how they can obtain a copy of such Source Code Form by
182 reasonable means in a timely manner, at a charge no more than the cost
183 of distribution to the recipient; and
184
185 b. You may distribute such Executable Form under the terms of this
186 License, or sublicense it under different terms, provided that the
187 license for the Executable Form does not attempt to limit or alter the
188 recipients' rights in the Source Code Form under this License.
189
1903.3. Distribution of a Larger Work
191
192 You may create and distribute a Larger Work under terms of Your choice,
193 provided that You also comply with the requirements of this License for
194 the Covered Software. If the Larger Work is a combination of Covered
195 Software with a work governed by one or more Secondary Licenses, and the
196 Covered Software is not Incompatible With Secondary Licenses, this
197 License permits You to additionally distribute such Covered Software
198 under the terms of such Secondary License(s), so that the recipient of
199 the Larger Work may, at their option, further distribute the Covered
200 Software under the terms of either this License or such Secondary
201 License(s).
202
2033.4. Notices
204
205 You may not remove or alter the substance of any license notices
206 (including copyright notices, patent notices, disclaimers of warranty, or
207 limitations of liability) contained within the Source Code Form of the
208 Covered Software, except that You may alter any license notices to the
209 extent required to remedy known factual inaccuracies.
210
2113.5. Application of Additional Terms
212
213 You may choose to offer, and to charge a fee for, warranty, support,
214 indemnity or liability obligations to one or more recipients of Covered
215 Software. However, You may do so only on Your own behalf, and not on
216 behalf of any Contributor. You must make it absolutely clear that any
217 such warranty, support, indemnity, or liability obligation is offered by
218 You alone, and You hereby agree to indemnify every Contributor for any
219 liability incurred by such Contributor as a result of warranty, support,
220 indemnity or liability terms You offer. You may include additional
221 disclaimers of warranty and limitations of liability specific to any
222 jurisdiction.
223
2244. Inability to Comply Due to Statute or Regulation
225
226 If it is impossible for You to comply with any of the terms of this License
227 with respect to some or all of the Covered Software due to statute,
228 judicial order, or regulation then You must: (a) comply with the terms of
229 this License to the maximum extent possible; and (b) describe the
230 limitations and the code they affect. Such description must be placed in a
231 text file included with all distributions of the Covered Software under
232 this License. Except to the extent prohibited by statute or regulation,
233 such description must be sufficiently detailed for a recipient of ordinary
234 skill to be able to understand it.
235
2365. Termination
237
2385.1. The rights granted under this License will terminate automatically if You
239 fail to comply with any of its terms. However, if You become compliant,
240 then the rights granted under this License from a particular Contributor
241 are reinstated (a) provisionally, unless and until such Contributor
242 explicitly and finally terminates Your grants, and (b) on an ongoing
243 basis, if such Contributor fails to notify You of the non-compliance by
244 some reasonable means prior to 60 days after You have come back into
245 compliance. Moreover, Your grants from a particular Contributor are
246 reinstated on an ongoing basis if such Contributor notifies You of the
247 non-compliance by some reasonable means, this is the first time You have
248 received notice of non-compliance with this License from such
249 Contributor, and You become compliant prior to 30 days after Your receipt
250 of the notice.
251
2525.2. If You initiate litigation against any entity by asserting a patent
253 infringement claim (excluding declaratory judgment actions,
254 counter-claims, and cross-claims) alleging that a Contributor Version
255 directly or indirectly infringes any patent, then the rights granted to
256 You by any and all Contributors for the Covered Software under Section
257 2.1 of this License shall terminate.
258
2595.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
260 license agreements (excluding distributors and resellers) which have been
261 validly granted by You or Your distributors under this License prior to
262 termination shall survive termination.
263
2646. Disclaimer of Warranty
265
266 Covered Software is provided under this License on an "as is" basis,
267 without warranty of any kind, either expressed, implied, or statutory,
268 including, without limitation, warranties that the Covered Software is free
269 of defects, merchantable, fit for a particular purpose or non-infringing.
270 The entire risk as to the quality and performance of the Covered Software
271 is with You. Should any Covered Software prove defective in any respect,
272 You (not any Contributor) assume the cost of any necessary servicing,
273 repair, or correction. This disclaimer of warranty constitutes an essential
274 part of this License. No use of any Covered Software is authorized under
275 this License except under this disclaimer.
276
2777. Limitation of Liability
278
279 Under no circumstances and under no legal theory, whether tort (including
280 negligence), contract, or otherwise, shall any Contributor, or anyone who
281 distributes Covered Software as permitted above, be liable to You for any
282 direct, indirect, special, incidental, or consequential damages of any
283 character including, without limitation, damages for lost profits, loss of
284 goodwill, work stoppage, computer failure or malfunction, or any and all
285 other commercial damages or losses, even if such party shall have been
286 informed of the possibility of such damages. This limitation of liability
287 shall not apply to liability for death or personal injury resulting from
288 such party's negligence to the extent applicable law prohibits such
289 limitation. Some jurisdictions do not allow the exclusion or limitation of
290 incidental or consequential damages, so this exclusion and limitation may
291 not apply to You.
292
2938. Litigation
294
295 Any litigation relating to this License may be brought only in the courts
296 of a jurisdiction where the defendant maintains its principal place of
297 business and such litigation shall be governed by laws of that
298 jurisdiction, without reference to its conflict-of-law provisions. Nothing
299 in this Section shall prevent a party's ability to bring cross-claims or
300 counter-claims.
301
3029. Miscellaneous
303
304 This License represents the complete agreement concerning the subject
305 matter hereof. If any provision of this License is held to be
306 unenforceable, such provision shall be reformed only to the extent
307 necessary to make it enforceable. Any law or regulation which provides that
308 the language of a contract shall be construed against the drafter shall not
309 be used to construe this License against a Contributor.
310
311
31210. Versions of the License
313
31410.1. New Versions
315
316 Mozilla Foundation is the license steward. Except as provided in Section
317 10.3, no one other than the license steward has the right to modify or
318 publish new versions of this License. Each version will be given a
319 distinguishing version number.
320
32110.2. Effect of New Versions
322
323 You may distribute the Covered Software under the terms of the version
324 of the License under which You originally received the Covered Software,
325 or under the terms of any subsequent version published by the license
326 steward.
327
32810.3. Modified Versions
329
330 If you create software not governed by this License, and you want to
331 create a new license for such software, you may create and use a
332 modified version of this License if you rename the license and remove
333 any references to the name of the license steward (except to note that
334 such modified license differs from this License).
335
33610.4. Distributing Source Code Form that is Incompatible With Secondary
337 Licenses If You choose to distribute Source Code Form that is
338 Incompatible With Secondary Licenses under the terms of this version of
339 the License, the notice described in Exhibit B of this License must be
340 attached.
341
342Exhibit A - Source Code Form License Notice
343
344 This Source Code Form is subject to the
345 terms of the Mozilla Public License, v.
346 2.0. If a copy of the MPL was not
347 distributed with this file, You can
348 obtain one at
349 http://mozilla.org/MPL/2.0/.
350
351If it is not possible or desirable to put the notice in a particular file,
352then You may include the notice in a location (such as a LICENSE file in a
353relevant directory) where a recipient would be likely to look for such a
354notice.
355
356You may add additional accurate notices of copyright ownership.
357
358Exhibit B - "Incompatible With Secondary Licenses" Notice
359
360 This Source Code Form is "Incompatible
361 With Secondary Licenses", as defined by
362 the Mozilla Public License, v. 2.0. \ No newline at end of file
diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md
new file mode 100644
index 0000000..d4db7fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/README.md
@@ -0,0 +1,86 @@
1# Yamux
2
3Yamux (Yet another Multiplexer) is a multiplexing library for Golang.
4It relies on an underlying connection to provide reliability
5and ordering, such as TCP or Unix domain sockets, and provides
6stream-oriented multiplexing. It is inspired by SPDY but is not
7interoperable with it.
8
9Yamux features include:
10
11* Bi-directional streams
12 * Streams can be opened by either client or server
13 * Useful for NAT traversal
14 * Server-side push support
15* Flow control
16 * Avoid starvation
17 * Back-pressure to prevent overwhelming a receiver
18* Keep Alives
19 * Enables persistent connections over a load balancer
20* Efficient
21 * Enables thousands of logical streams with low overhead
22
23## Documentation
24
25For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux).
26
27## Specification
28
29The full specification for Yamux is provided in the `spec.md` file.
30It can be used as a guide to implementors of interoperable libraries.
31
32## Usage
33
34Using Yamux is remarkably simple:
35
36```go
37
38func client() {
39 // Get a TCP connection
40 conn, err := net.Dial(...)
41 if err != nil {
42 panic(err)
43 }
44
45 // Setup client side of yamux
46 session, err := yamux.Client(conn, nil)
47 if err != nil {
48 panic(err)
49 }
50
51 // Open a new stream
52 stream, err := session.Open()
53 if err != nil {
54 panic(err)
55 }
56
57 // Stream implements net.Conn
58 stream.Write([]byte("ping"))
59}
60
61func server() {
62 // Accept a TCP connection
63 conn, err := listener.Accept()
64 if err != nil {
65 panic(err)
66 }
67
68 // Setup server side of yamux
69 session, err := yamux.Server(conn, nil)
70 if err != nil {
71 panic(err)
72 }
73
74 // Accept a stream
75 stream, err := session.Accept()
76 if err != nil {
77 panic(err)
78 }
79
80 // Listen for a message
81 buf := make([]byte, 4)
82 stream.Read(buf)
83}
84
85```
86
diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go
new file mode 100644
index 0000000..be6ebca
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/addr.go
@@ -0,0 +1,60 @@
1package yamux
2
3import (
4 "fmt"
5 "net"
6)
7
8// hasAddr is used to get the address from the underlying connection
9type hasAddr interface {
10 LocalAddr() net.Addr
11 RemoteAddr() net.Addr
12}
13
14// yamuxAddr is used when we cannot get the underlying address
15type yamuxAddr struct {
16 Addr string
17}
18
19func (*yamuxAddr) Network() string {
20 return "yamux"
21}
22
23func (y *yamuxAddr) String() string {
24 return fmt.Sprintf("yamux:%s", y.Addr)
25}
26
27// Addr is used to get the address of the listener.
28func (s *Session) Addr() net.Addr {
29 return s.LocalAddr()
30}
31
32// LocalAddr is used to get the local address of the
33// underlying connection.
34func (s *Session) LocalAddr() net.Addr {
35 addr, ok := s.conn.(hasAddr)
36 if !ok {
37 return &yamuxAddr{"local"}
38 }
39 return addr.LocalAddr()
40}
41
42// RemoteAddr is used to get the address of remote end
43// of the underlying connection
44func (s *Session) RemoteAddr() net.Addr {
45 addr, ok := s.conn.(hasAddr)
46 if !ok {
47 return &yamuxAddr{"remote"}
48 }
49 return addr.RemoteAddr()
50}
51
52// LocalAddr returns the local address
53func (s *Stream) LocalAddr() net.Addr {
54 return s.session.LocalAddr()
55}
56
57// LocalAddr returns the remote address
58func (s *Stream) RemoteAddr() net.Addr {
59 return s.session.RemoteAddr()
60}
diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go
new file mode 100644
index 0000000..4f52938
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/const.go
@@ -0,0 +1,157 @@
1package yamux
2
3import (
4 "encoding/binary"
5 "fmt"
6)
7
8var (
9 // ErrInvalidVersion means we received a frame with an
10 // invalid version
11 ErrInvalidVersion = fmt.Errorf("invalid protocol version")
12
13 // ErrInvalidMsgType means we received a frame with an
14 // invalid message type
15 ErrInvalidMsgType = fmt.Errorf("invalid msg type")
16
17 // ErrSessionShutdown is used if there is a shutdown during
18 // an operation
19 ErrSessionShutdown = fmt.Errorf("session shutdown")
20
21 // ErrStreamsExhausted is returned if we have no more
22 // stream ids to issue
23 ErrStreamsExhausted = fmt.Errorf("streams exhausted")
24
25 // ErrDuplicateStream is used if a duplicate stream is
26 // opened inbound
27 ErrDuplicateStream = fmt.Errorf("duplicate stream initiated")
28
29 // ErrReceiveWindowExceeded indicates the window was exceeded
30 ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded")
31
32 // ErrTimeout is used when we reach an IO deadline
33 ErrTimeout = fmt.Errorf("i/o deadline reached")
34
35 // ErrStreamClosed is returned when using a closed stream
36 ErrStreamClosed = fmt.Errorf("stream closed")
37
38 // ErrUnexpectedFlag is set when we get an unexpected flag
39 ErrUnexpectedFlag = fmt.Errorf("unexpected flag")
40
41 // ErrRemoteGoAway is used when we get a go away from the other side
42 ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections")
43
44 // ErrConnectionReset is sent if a stream is reset. This can happen
45 // if the backlog is exceeded, or if there was a remote GoAway.
46 ErrConnectionReset = fmt.Errorf("connection reset")
47
48 // ErrConnectionWriteTimeout indicates that we hit the "safety valve"
49 // timeout writing to the underlying stream connection.
50 ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout")
51
52 // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close
53 ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout")
54)
55
56const (
57 // protoVersion is the only version we support
58 protoVersion uint8 = 0
59)
60
61const (
62 // Data is used for data frames. They are followed
63 // by length bytes worth of payload.
64 typeData uint8 = iota
65
66 // WindowUpdate is used to change the window of
67 // a given stream. The length indicates the delta
68 // update to the window.
69 typeWindowUpdate
70
71 // Ping is sent as a keep-alive or to measure
72 // the RTT. The StreamID and Length value are echoed
73 // back in the response.
74 typePing
75
76 // GoAway is sent to terminate a session. The StreamID
77 // should be 0 and the length is an error code.
78 typeGoAway
79)
80
81const (
82 // SYN is sent to signal a new stream. May
83 // be sent with a data payload
84 flagSYN uint16 = 1 << iota
85
86 // ACK is sent to acknowledge a new stream. May
87 // be sent with a data payload
88 flagACK
89
90 // FIN is sent to half-close the given stream.
91 // May be sent with a data payload.
92 flagFIN
93
94 // RST is used to hard close a given stream.
95 flagRST
96)
97
98const (
99 // initialStreamWindow is the initial stream window size
100 initialStreamWindow uint32 = 256 * 1024
101)
102
103const (
104 // goAwayNormal is sent on a normal termination
105 goAwayNormal uint32 = iota
106
107 // goAwayProtoErr sent on a protocol error
108 goAwayProtoErr
109
110 // goAwayInternalErr sent on an internal error
111 goAwayInternalErr
112)
113
114const (
115 sizeOfVersion = 1
116 sizeOfType = 1
117 sizeOfFlags = 2
118 sizeOfStreamID = 4
119 sizeOfLength = 4
120 headerSize = sizeOfVersion + sizeOfType + sizeOfFlags +
121 sizeOfStreamID + sizeOfLength
122)
123
124type header []byte
125
126func (h header) Version() uint8 {
127 return h[0]
128}
129
130func (h header) MsgType() uint8 {
131 return h[1]
132}
133
134func (h header) Flags() uint16 {
135 return binary.BigEndian.Uint16(h[2:4])
136}
137
138func (h header) StreamID() uint32 {
139 return binary.BigEndian.Uint32(h[4:8])
140}
141
142func (h header) Length() uint32 {
143 return binary.BigEndian.Uint32(h[8:12])
144}
145
146func (h header) String() string {
147 return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d",
148 h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length())
149}
150
151func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) {
152 h[0] = protoVersion
153 h[1] = msgType
154 binary.BigEndian.PutUint16(h[2:4], flags)
155 binary.BigEndian.PutUint32(h[4:8], streamID)
156 binary.BigEndian.PutUint32(h[8:12], length)
157}
diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go
new file mode 100644
index 0000000..7abc7c7
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/mux.go
@@ -0,0 +1,87 @@
1package yamux
2
3import (
4 "fmt"
5 "io"
6 "os"
7 "time"
8)
9
10// Config is used to tune the Yamux session
11type Config struct {
12 // AcceptBacklog is used to limit how many streams may be
13 // waiting an accept.
14 AcceptBacklog int
15
16 // EnableKeepalive is used to do a period keep alive
17 // messages using a ping.
18 EnableKeepAlive bool
19
20 // KeepAliveInterval is how often to perform the keep alive
21 KeepAliveInterval time.Duration
22
23 // ConnectionWriteTimeout is meant to be a "safety valve" timeout after
24 // we which will suspect a problem with the underlying connection and
25 // close it. This is only applied to writes, where's there's generally
26 // an expectation that things will move along quickly.
27 ConnectionWriteTimeout time.Duration
28
29 // MaxStreamWindowSize is used to control the maximum
30 // window size that we allow for a stream.
31 MaxStreamWindowSize uint32
32
33 // LogOutput is used to control the log destination
34 LogOutput io.Writer
35}
36
37// DefaultConfig is used to return a default configuration
38func DefaultConfig() *Config {
39 return &Config{
40 AcceptBacklog: 256,
41 EnableKeepAlive: true,
42 KeepAliveInterval: 30 * time.Second,
43 ConnectionWriteTimeout: 10 * time.Second,
44 MaxStreamWindowSize: initialStreamWindow,
45 LogOutput: os.Stderr,
46 }
47}
48
49// VerifyConfig is used to verify the sanity of configuration
50func VerifyConfig(config *Config) error {
51 if config.AcceptBacklog <= 0 {
52 return fmt.Errorf("backlog must be positive")
53 }
54 if config.KeepAliveInterval == 0 {
55 return fmt.Errorf("keep-alive interval must be positive")
56 }
57 if config.MaxStreamWindowSize < initialStreamWindow {
58 return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow)
59 }
60 return nil
61}
62
63// Server is used to initialize a new server-side connection.
64// There must be at most one server-side connection. If a nil config is
65// provided, the DefaultConfiguration will be used.
66func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) {
67 if config == nil {
68 config = DefaultConfig()
69 }
70 if err := VerifyConfig(config); err != nil {
71 return nil, err
72 }
73 return newSession(config, conn, false), nil
74}
75
76// Client is used to initialize a new client-side connection.
77// There must be at most one client-side connection.
78func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) {
79 if config == nil {
80 config = DefaultConfig()
81 }
82
83 if err := VerifyConfig(config); err != nil {
84 return nil, err
85 }
86 return newSession(config, conn, true), nil
87}
diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go
new file mode 100644
index 0000000..e179818
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/session.go
@@ -0,0 +1,623 @@
1package yamux
2
3import (
4 "bufio"
5 "fmt"
6 "io"
7 "io/ioutil"
8 "log"
9 "math"
10 "net"
11 "strings"
12 "sync"
13 "sync/atomic"
14 "time"
15)
16
17// Session is used to wrap a reliable ordered connection and to
18// multiplex it into multiple streams.
19type Session struct {
20 // remoteGoAway indicates the remote side does
21 // not want futher connections. Must be first for alignment.
22 remoteGoAway int32
23
24 // localGoAway indicates that we should stop
25 // accepting futher connections. Must be first for alignment.
26 localGoAway int32
27
28 // nextStreamID is the next stream we should
29 // send. This depends if we are a client/server.
30 nextStreamID uint32
31
32 // config holds our configuration
33 config *Config
34
35 // logger is used for our logs
36 logger *log.Logger
37
38 // conn is the underlying connection
39 conn io.ReadWriteCloser
40
41 // bufRead is a buffered reader
42 bufRead *bufio.Reader
43
44 // pings is used to track inflight pings
45 pings map[uint32]chan struct{}
46 pingID uint32
47 pingLock sync.Mutex
48
49 // streams maps a stream id to a stream, and inflight has an entry
50 // for any outgoing stream that has not yet been established. Both are
51 // protected by streamLock.
52 streams map[uint32]*Stream
53 inflight map[uint32]struct{}
54 streamLock sync.Mutex
55
56 // synCh acts like a semaphore. It is sized to the AcceptBacklog which
57 // is assumed to be symmetric between the client and server. This allows
58 // the client to avoid exceeding the backlog and instead blocks the open.
59 synCh chan struct{}
60
61 // acceptCh is used to pass ready streams to the client
62 acceptCh chan *Stream
63
64 // sendCh is used to mark a stream as ready to send,
65 // or to send a header out directly.
66 sendCh chan sendReady
67
68 // recvDoneCh is closed when recv() exits to avoid a race
69 // between stream registration and stream shutdown
70 recvDoneCh chan struct{}
71
72 // shutdown is used to safely close a session
73 shutdown bool
74 shutdownErr error
75 shutdownCh chan struct{}
76 shutdownLock sync.Mutex
77}
78
79// sendReady is used to either mark a stream as ready
80// or to directly send a header
81type sendReady struct {
82 Hdr []byte
83 Body io.Reader
84 Err chan error
85}
86
87// newSession is used to construct a new session
88func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
89 s := &Session{
90 config: config,
91 logger: log.New(config.LogOutput, "", log.LstdFlags),
92 conn: conn,
93 bufRead: bufio.NewReader(conn),
94 pings: make(map[uint32]chan struct{}),
95 streams: make(map[uint32]*Stream),
96 inflight: make(map[uint32]struct{}),
97 synCh: make(chan struct{}, config.AcceptBacklog),
98 acceptCh: make(chan *Stream, config.AcceptBacklog),
99 sendCh: make(chan sendReady, 64),
100 recvDoneCh: make(chan struct{}),
101 shutdownCh: make(chan struct{}),
102 }
103 if client {
104 s.nextStreamID = 1
105 } else {
106 s.nextStreamID = 2
107 }
108 go s.recv()
109 go s.send()
110 if config.EnableKeepAlive {
111 go s.keepalive()
112 }
113 return s
114}
115
116// IsClosed does a safe check to see if we have shutdown
117func (s *Session) IsClosed() bool {
118 select {
119 case <-s.shutdownCh:
120 return true
121 default:
122 return false
123 }
124}
125
126// NumStreams returns the number of currently open streams
127func (s *Session) NumStreams() int {
128 s.streamLock.Lock()
129 num := len(s.streams)
130 s.streamLock.Unlock()
131 return num
132}
133
134// Open is used to create a new stream as a net.Conn
135func (s *Session) Open() (net.Conn, error) {
136 conn, err := s.OpenStream()
137 if err != nil {
138 return nil, err
139 }
140 return conn, nil
141}
142
143// OpenStream is used to create a new stream
144func (s *Session) OpenStream() (*Stream, error) {
145 if s.IsClosed() {
146 return nil, ErrSessionShutdown
147 }
148 if atomic.LoadInt32(&s.remoteGoAway) == 1 {
149 return nil, ErrRemoteGoAway
150 }
151
152 // Block if we have too many inflight SYNs
153 select {
154 case s.synCh <- struct{}{}:
155 case <-s.shutdownCh:
156 return nil, ErrSessionShutdown
157 }
158
159GET_ID:
160 // Get an ID, and check for stream exhaustion
161 id := atomic.LoadUint32(&s.nextStreamID)
162 if id >= math.MaxUint32-1 {
163 return nil, ErrStreamsExhausted
164 }
165 if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) {
166 goto GET_ID
167 }
168
169 // Register the stream
170 stream := newStream(s, id, streamInit)
171 s.streamLock.Lock()
172 s.streams[id] = stream
173 s.inflight[id] = struct{}{}
174 s.streamLock.Unlock()
175
176 // Send the window update to create
177 if err := stream.sendWindowUpdate(); err != nil {
178 select {
179 case <-s.synCh:
180 default:
181 s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore")
182 }
183 return nil, err
184 }
185 return stream, nil
186}
187
188// Accept is used to block until the next available stream
189// is ready to be accepted.
190func (s *Session) Accept() (net.Conn, error) {
191 conn, err := s.AcceptStream()
192 if err != nil {
193 return nil, err
194 }
195 return conn, err
196}
197
198// AcceptStream is used to block until the next available stream
199// is ready to be accepted.
200func (s *Session) AcceptStream() (*Stream, error) {
201 select {
202 case stream := <-s.acceptCh:
203 if err := stream.sendWindowUpdate(); err != nil {
204 return nil, err
205 }
206 return stream, nil
207 case <-s.shutdownCh:
208 return nil, s.shutdownErr
209 }
210}
211
212// Close is used to close the session and all streams.
213// Attempts to send a GoAway before closing the connection.
214func (s *Session) Close() error {
215 s.shutdownLock.Lock()
216 defer s.shutdownLock.Unlock()
217
218 if s.shutdown {
219 return nil
220 }
221 s.shutdown = true
222 if s.shutdownErr == nil {
223 s.shutdownErr = ErrSessionShutdown
224 }
225 close(s.shutdownCh)
226 s.conn.Close()
227 <-s.recvDoneCh
228
229 s.streamLock.Lock()
230 defer s.streamLock.Unlock()
231 for _, stream := range s.streams {
232 stream.forceClose()
233 }
234 return nil
235}
236
237// exitErr is used to handle an error that is causing the
238// session to terminate.
239func (s *Session) exitErr(err error) {
240 s.shutdownLock.Lock()
241 if s.shutdownErr == nil {
242 s.shutdownErr = err
243 }
244 s.shutdownLock.Unlock()
245 s.Close()
246}
247
248// GoAway can be used to prevent accepting further
249// connections. It does not close the underlying conn.
250func (s *Session) GoAway() error {
251 return s.waitForSend(s.goAway(goAwayNormal), nil)
252}
253
254// goAway is used to send a goAway message
255func (s *Session) goAway(reason uint32) header {
256 atomic.SwapInt32(&s.localGoAway, 1)
257 hdr := header(make([]byte, headerSize))
258 hdr.encode(typeGoAway, 0, 0, reason)
259 return hdr
260}
261
262// Ping is used to measure the RTT response time
263func (s *Session) Ping() (time.Duration, error) {
264 // Get a channel for the ping
265 ch := make(chan struct{})
266
267 // Get a new ping id, mark as pending
268 s.pingLock.Lock()
269 id := s.pingID
270 s.pingID++
271 s.pings[id] = ch
272 s.pingLock.Unlock()
273
274 // Send the ping request
275 hdr := header(make([]byte, headerSize))
276 hdr.encode(typePing, flagSYN, 0, id)
277 if err := s.waitForSend(hdr, nil); err != nil {
278 return 0, err
279 }
280
281 // Wait for a response
282 start := time.Now()
283 select {
284 case <-ch:
285 case <-time.After(s.config.ConnectionWriteTimeout):
286 s.pingLock.Lock()
287 delete(s.pings, id) // Ignore it if a response comes later.
288 s.pingLock.Unlock()
289 return 0, ErrTimeout
290 case <-s.shutdownCh:
291 return 0, ErrSessionShutdown
292 }
293
294 // Compute the RTT
295 return time.Now().Sub(start), nil
296}
297
298// keepalive is a long running goroutine that periodically does
299// a ping to keep the connection alive.
300func (s *Session) keepalive() {
301 for {
302 select {
303 case <-time.After(s.config.KeepAliveInterval):
304 _, err := s.Ping()
305 if err != nil {
306 s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
307 s.exitErr(ErrKeepAliveTimeout)
308 return
309 }
310 case <-s.shutdownCh:
311 return
312 }
313 }
314}
315
316// waitForSendErr waits to send a header, checking for a potential shutdown
317func (s *Session) waitForSend(hdr header, body io.Reader) error {
318 errCh := make(chan error, 1)
319 return s.waitForSendErr(hdr, body, errCh)
320}
321
322// waitForSendErr waits to send a header with optional data, checking for a
323// potential shutdown. Since there's the expectation that sends can happen
324// in a timely manner, we enforce the connection write timeout here.
325func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error {
326 timer := time.NewTimer(s.config.ConnectionWriteTimeout)
327 defer timer.Stop()
328
329 ready := sendReady{Hdr: hdr, Body: body, Err: errCh}
330 select {
331 case s.sendCh <- ready:
332 case <-s.shutdownCh:
333 return ErrSessionShutdown
334 case <-timer.C:
335 return ErrConnectionWriteTimeout
336 }
337
338 select {
339 case err := <-errCh:
340 return err
341 case <-s.shutdownCh:
342 return ErrSessionShutdown
343 case <-timer.C:
344 return ErrConnectionWriteTimeout
345 }
346}
347
348// sendNoWait does a send without waiting. Since there's the expectation that
349// the send happens right here, we enforce the connection write timeout if we
350// can't queue the header to be sent.
351func (s *Session) sendNoWait(hdr header) error {
352 timer := time.NewTimer(s.config.ConnectionWriteTimeout)
353 defer timer.Stop()
354
355 select {
356 case s.sendCh <- sendReady{Hdr: hdr}:
357 return nil
358 case <-s.shutdownCh:
359 return ErrSessionShutdown
360 case <-timer.C:
361 return ErrConnectionWriteTimeout
362 }
363}
364
365// send is a long running goroutine that sends data
366func (s *Session) send() {
367 for {
368 select {
369 case ready := <-s.sendCh:
370 // Send a header if ready
371 if ready.Hdr != nil {
372 sent := 0
373 for sent < len(ready.Hdr) {
374 n, err := s.conn.Write(ready.Hdr[sent:])
375 if err != nil {
376 s.logger.Printf("[ERR] yamux: Failed to write header: %v", err)
377 asyncSendErr(ready.Err, err)
378 s.exitErr(err)
379 return
380 }
381 sent += n
382 }
383 }
384
385 // Send data from a body if given
386 if ready.Body != nil {
387 _, err := io.Copy(s.conn, ready.Body)
388 if err != nil {
389 s.logger.Printf("[ERR] yamux: Failed to write body: %v", err)
390 asyncSendErr(ready.Err, err)
391 s.exitErr(err)
392 return
393 }
394 }
395
396 // No error, successful send
397 asyncSendErr(ready.Err, nil)
398 case <-s.shutdownCh:
399 return
400 }
401 }
402}
403
404// recv is a long running goroutine that accepts new data
405func (s *Session) recv() {
406 if err := s.recvLoop(); err != nil {
407 s.exitErr(err)
408 }
409}
410
411// recvLoop continues to receive data until a fatal error is encountered
412func (s *Session) recvLoop() error {
413 defer close(s.recvDoneCh)
414 hdr := header(make([]byte, headerSize))
415 var handler func(header) error
416 for {
417 // Read the header
418 if _, err := io.ReadFull(s.bufRead, hdr); err != nil {
419 if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") {
420 s.logger.Printf("[ERR] yamux: Failed to read header: %v", err)
421 }
422 return err
423 }
424
425 // Verify the version
426 if hdr.Version() != protoVersion {
427 s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version())
428 return ErrInvalidVersion
429 }
430
431 // Switch on the type
432 switch hdr.MsgType() {
433 case typeData:
434 handler = s.handleStreamMessage
435 case typeWindowUpdate:
436 handler = s.handleStreamMessage
437 case typeGoAway:
438 handler = s.handleGoAway
439 case typePing:
440 handler = s.handlePing
441 default:
442 return ErrInvalidMsgType
443 }
444
445 // Invoke the handler
446 if err := handler(hdr); err != nil {
447 return err
448 }
449 }
450}
451
452// handleStreamMessage handles either a data or window update frame
453func (s *Session) handleStreamMessage(hdr header) error {
454 // Check for a new stream creation
455 id := hdr.StreamID()
456 flags := hdr.Flags()
457 if flags&flagSYN == flagSYN {
458 if err := s.incomingStream(id); err != nil {
459 return err
460 }
461 }
462
463 // Get the stream
464 s.streamLock.Lock()
465 stream := s.streams[id]
466 s.streamLock.Unlock()
467
468 // If we do not have a stream, likely we sent a RST
469 if stream == nil {
470 // Drain any data on the wire
471 if hdr.MsgType() == typeData && hdr.Length() > 0 {
472 s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id)
473 if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil {
474 s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err)
475 return nil
476 }
477 } else {
478 s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr)
479 }
480 return nil
481 }
482
483 // Check if this is a window update
484 if hdr.MsgType() == typeWindowUpdate {
485 if err := stream.incrSendWindow(hdr, flags); err != nil {
486 if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
487 s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
488 }
489 return err
490 }
491 return nil
492 }
493
494 // Read the new data
495 if err := stream.readData(hdr, flags, s.bufRead); err != nil {
496 if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
497 s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
498 }
499 return err
500 }
501 return nil
502}
503
504// handlePing is invokde for a typePing frame
505func (s *Session) handlePing(hdr header) error {
506 flags := hdr.Flags()
507 pingID := hdr.Length()
508
509 // Check if this is a query, respond back in a separate context so we
510 // don't interfere with the receiving thread blocking for the write.
511 if flags&flagSYN == flagSYN {
512 go func() {
513 hdr := header(make([]byte, headerSize))
514 hdr.encode(typePing, flagACK, 0, pingID)
515 if err := s.sendNoWait(hdr); err != nil {
516 s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err)
517 }
518 }()
519 return nil
520 }
521
522 // Handle a response
523 s.pingLock.Lock()
524 ch := s.pings[pingID]
525 if ch != nil {
526 delete(s.pings, pingID)
527 close(ch)
528 }
529 s.pingLock.Unlock()
530 return nil
531}
532
533// handleGoAway is invokde for a typeGoAway frame
534func (s *Session) handleGoAway(hdr header) error {
535 code := hdr.Length()
536 switch code {
537 case goAwayNormal:
538 atomic.SwapInt32(&s.remoteGoAway, 1)
539 case goAwayProtoErr:
540 s.logger.Printf("[ERR] yamux: received protocol error go away")
541 return fmt.Errorf("yamux protocol error")
542 case goAwayInternalErr:
543 s.logger.Printf("[ERR] yamux: received internal error go away")
544 return fmt.Errorf("remote yamux internal error")
545 default:
546 s.logger.Printf("[ERR] yamux: received unexpected go away")
547 return fmt.Errorf("unexpected go away received")
548 }
549 return nil
550}
551
552// incomingStream is used to create a new incoming stream
553func (s *Session) incomingStream(id uint32) error {
554 // Reject immediately if we are doing a go away
555 if atomic.LoadInt32(&s.localGoAway) == 1 {
556 hdr := header(make([]byte, headerSize))
557 hdr.encode(typeWindowUpdate, flagRST, id, 0)
558 return s.sendNoWait(hdr)
559 }
560
561 // Allocate a new stream
562 stream := newStream(s, id, streamSYNReceived)
563
564 s.streamLock.Lock()
565 defer s.streamLock.Unlock()
566
567 // Check if stream already exists
568 if _, ok := s.streams[id]; ok {
569 s.logger.Printf("[ERR] yamux: duplicate stream declared")
570 if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
571 s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
572 }
573 return ErrDuplicateStream
574 }
575
576 // Register the stream
577 s.streams[id] = stream
578
579 // Check if we've exceeded the backlog
580 select {
581 case s.acceptCh <- stream:
582 return nil
583 default:
584 // Backlog exceeded! RST the stream
585 s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset")
586 delete(s.streams, id)
587 stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0)
588 return s.sendNoWait(stream.sendHdr)
589 }
590}
591
592// closeStream is used to close a stream once both sides have
593// issued a close. If there was an in-flight SYN and the stream
594// was not yet established, then this will give the credit back.
595func (s *Session) closeStream(id uint32) {
596 s.streamLock.Lock()
597 if _, ok := s.inflight[id]; ok {
598 select {
599 case <-s.synCh:
600 default:
601 s.logger.Printf("[ERR] yamux: SYN tracking out of sync")
602 }
603 }
604 delete(s.streams, id)
605 s.streamLock.Unlock()
606}
607
608// establishStream is used to mark a stream that was in the
609// SYN Sent state as established.
610func (s *Session) establishStream(id uint32) {
611 s.streamLock.Lock()
612 if _, ok := s.inflight[id]; ok {
613 delete(s.inflight, id)
614 } else {
615 s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)")
616 }
617 select {
618 case <-s.synCh:
619 default:
620 s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)")
621 }
622 s.streamLock.Unlock()
623}
diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md
new file mode 100644
index 0000000..183d797
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/spec.md
@@ -0,0 +1,140 @@
1# Specification
2
3We use this document to detail the internal specification of Yamux.
4This is used both as a guide for implementing Yamux, but also for
5alternative interoperable libraries to be built.
6
7# Framing
8
9Yamux uses a streaming connection underneath, but imposes a message
10framing so that it can be shared between many logical streams. Each
11frame contains a header like:
12
13* Version (8 bits)
14* Type (8 bits)
15* Flags (16 bits)
16* StreamID (32 bits)
17* Length (32 bits)
18
19This means that each header has a 12 byte overhead.
20All fields are encoded in network order (big endian).
21Each field is described below:
22
23## Version Field
24
25The version field is used for future backward compatibility. At the
26current time, the field is always set to 0, to indicate the initial
27version.
28
29## Type Field
30
31The type field is used to switch the frame message type. The following
32message types are supported:
33
34* 0x0 Data - Used to transmit data. May transmit zero length payloads
35 depending on the flags.
36
37* 0x1 Window Update - Used to updated the senders receive window size.
38 This is used to implement per-session flow control.
39
40* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat
41 and do keep-alives over TCP.
42
43* 0x3 Go Away - Used to close a session.
44
45## Flag Field
46
47The flags field is used to provide additional information related
48to the message type. The following flags are supported:
49
50* 0x1 SYN - Signals the start of a new stream. May be sent with a data or
51 window update message. Also sent with a ping to indicate outbound.
52
53* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data
54 or window update message. Also sent with a ping to indicate response.
55
56* 0x4 FIN - Performs a half-close of a stream. May be sent with a data
57 message or window update.
58
59* 0x8 RST - Reset a stream immediately. May be sent with a data or
60 window update message.
61
62## StreamID Field
63
64The StreamID field is used to identify the logical stream the frame
65is addressing. The client side should use odd ID's, and the server even.
66This prevents any collisions. Additionally, the 0 ID is reserved to represent
67the session.
68
69Both Ping and Go Away messages should always use the 0 StreamID.
70
71## Length Field
72
73The meaning of the length field depends on the message type:
74
75* Data - provides the length of bytes following the header
76* Window update - provides a delta update to the window size
77* Ping - Contains an opaque value, echoed back
78* Go Away - Contains an error code
79
80# Message Flow
81
82There is no explicit connection setup, as Yamux relies on an underlying
83transport to be provided. However, there is a distinction between client
84and server side of the connection.
85
86## Opening a stream
87
88To open a stream, an initial data or window update frame is sent
89with a new StreamID. The SYN flag should be set to signal a new stream.
90
91The receiver must then reply with either a data or window update frame
92with the StreamID along with the ACK flag to accept the stream or with
93the RST flag to reject the stream.
94
95Because we are relying on the reliable stream underneath, a connection
96can begin sending data once the SYN flag is sent. The corresponding
97ACK does not need to be received. This is particularly well suited
98for an RPC system where a client wants to open a stream and immediately
99fire a request without waiting for the RTT of the ACK.
100
101This does introduce the possibility of a connection being rejected
102after data has been sent already. This is a slight semantic difference
103from TCP, where the conection cannot be refused after it is opened.
104Clients should be prepared to handle this by checking for an error
105that indicates a RST was received.
106
107## Closing a stream
108
109To close a stream, either side sends a data or window update frame
110along with the FIN flag. This does a half-close indicating the sender
111will send no further data.
112
113Once both sides have closed the connection, the stream is closed.
114
115Alternatively, if an error occurs, the RST flag can be used to
116hard close a stream immediately.
117
118## Flow Control
119
120When Yamux is initially starts each stream with a 256KB window size.
121There is no window size for the session.
122
123To prevent the streams from stalling, window update frames should be
124sent regularly. Yamux can be configured to provide a larger limit for
125windows sizes. Both sides assume the initial 256KB window, but can
126immediately send a window update as part of the SYN/ACK indicating a
127larger window.
128
129Both sides should track the number of bytes sent in Data frames
130only, as only they are tracked as part of the window size.
131
132## Session termination
133
134When a session is being terminated, the Go Away message should
135be sent. The Length should be set to one of the following to
136provide an error code:
137
138* 0x0 Normal termination
139* 0x1 Protocol error
140* 0x2 Internal error
diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go
new file mode 100644
index 0000000..d216e28
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/stream.go
@@ -0,0 +1,457 @@
1package yamux
2
3import (
4 "bytes"
5 "io"
6 "sync"
7 "sync/atomic"
8 "time"
9)
10
11type streamState int
12
13const (
14 streamInit streamState = iota
15 streamSYNSent
16 streamSYNReceived
17 streamEstablished
18 streamLocalClose
19 streamRemoteClose
20 streamClosed
21 streamReset
22)
23
24// Stream is used to represent a logical stream
25// within a session.
26type Stream struct {
27 recvWindow uint32
28 sendWindow uint32
29
30 id uint32
31 session *Session
32
33 state streamState
34 stateLock sync.Mutex
35
36 recvBuf *bytes.Buffer
37 recvLock sync.Mutex
38
39 controlHdr header
40 controlErr chan error
41 controlHdrLock sync.Mutex
42
43 sendHdr header
44 sendErr chan error
45 sendLock sync.Mutex
46
47 recvNotifyCh chan struct{}
48 sendNotifyCh chan struct{}
49
50 readDeadline time.Time
51 writeDeadline time.Time
52}
53
54// newStream is used to construct a new stream within
55// a given session for an ID
56func newStream(session *Session, id uint32, state streamState) *Stream {
57 s := &Stream{
58 id: id,
59 session: session,
60 state: state,
61 controlHdr: header(make([]byte, headerSize)),
62 controlErr: make(chan error, 1),
63 sendHdr: header(make([]byte, headerSize)),
64 sendErr: make(chan error, 1),
65 recvWindow: initialStreamWindow,
66 sendWindow: initialStreamWindow,
67 recvNotifyCh: make(chan struct{}, 1),
68 sendNotifyCh: make(chan struct{}, 1),
69 }
70 return s
71}
72
73// Session returns the associated stream session
74func (s *Stream) Session() *Session {
75 return s.session
76}
77
78// StreamID returns the ID of this stream
79func (s *Stream) StreamID() uint32 {
80 return s.id
81}
82
83// Read is used to read from the stream
84func (s *Stream) Read(b []byte) (n int, err error) {
85 defer asyncNotify(s.recvNotifyCh)
86START:
87 s.stateLock.Lock()
88 switch s.state {
89 case streamLocalClose:
90 fallthrough
91 case streamRemoteClose:
92 fallthrough
93 case streamClosed:
94 s.recvLock.Lock()
95 if s.recvBuf == nil || s.recvBuf.Len() == 0 {
96 s.recvLock.Unlock()
97 s.stateLock.Unlock()
98 return 0, io.EOF
99 }
100 s.recvLock.Unlock()
101 case streamReset:
102 s.stateLock.Unlock()
103 return 0, ErrConnectionReset
104 }
105 s.stateLock.Unlock()
106
107 // If there is no data available, block
108 s.recvLock.Lock()
109 if s.recvBuf == nil || s.recvBuf.Len() == 0 {
110 s.recvLock.Unlock()
111 goto WAIT
112 }
113
114 // Read any bytes
115 n, _ = s.recvBuf.Read(b)
116 s.recvLock.Unlock()
117
118 // Send a window update potentially
119 err = s.sendWindowUpdate()
120 return n, err
121
122WAIT:
123 var timeout <-chan time.Time
124 var timer *time.Timer
125 if !s.readDeadline.IsZero() {
126 delay := s.readDeadline.Sub(time.Now())
127 timer = time.NewTimer(delay)
128 timeout = timer.C
129 }
130 select {
131 case <-s.recvNotifyCh:
132 if timer != nil {
133 timer.Stop()
134 }
135 goto START
136 case <-timeout:
137 return 0, ErrTimeout
138 }
139}
140
141// Write is used to write to the stream
142func (s *Stream) Write(b []byte) (n int, err error) {
143 s.sendLock.Lock()
144 defer s.sendLock.Unlock()
145 total := 0
146 for total < len(b) {
147 n, err := s.write(b[total:])
148 total += n
149 if err != nil {
150 return total, err
151 }
152 }
153 return total, nil
154}
155
156// write is used to write to the stream, may return on
157// a short write.
158func (s *Stream) write(b []byte) (n int, err error) {
159 var flags uint16
160 var max uint32
161 var body io.Reader
162START:
163 s.stateLock.Lock()
164 switch s.state {
165 case streamLocalClose:
166 fallthrough
167 case streamClosed:
168 s.stateLock.Unlock()
169 return 0, ErrStreamClosed
170 case streamReset:
171 s.stateLock.Unlock()
172 return 0, ErrConnectionReset
173 }
174 s.stateLock.Unlock()
175
176 // If there is no data available, block
177 window := atomic.LoadUint32(&s.sendWindow)
178 if window == 0 {
179 goto WAIT
180 }
181
182 // Determine the flags if any
183 flags = s.sendFlags()
184
185 // Send up to our send window
186 max = min(window, uint32(len(b)))
187 body = bytes.NewReader(b[:max])
188
189 // Send the header
190 s.sendHdr.encode(typeData, flags, s.id, max)
191 if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil {
192 return 0, err
193 }
194
195 // Reduce our send window
196 atomic.AddUint32(&s.sendWindow, ^uint32(max-1))
197
198 // Unlock
199 return int(max), err
200
201WAIT:
202 var timeout <-chan time.Time
203 if !s.writeDeadline.IsZero() {
204 delay := s.writeDeadline.Sub(time.Now())
205 timeout = time.After(delay)
206 }
207 select {
208 case <-s.sendNotifyCh:
209 goto START
210 case <-timeout:
211 return 0, ErrTimeout
212 }
213 return 0, nil
214}
215
216// sendFlags determines any flags that are appropriate
217// based on the current stream state
218func (s *Stream) sendFlags() uint16 {
219 s.stateLock.Lock()
220 defer s.stateLock.Unlock()
221 var flags uint16
222 switch s.state {
223 case streamInit:
224 flags |= flagSYN
225 s.state = streamSYNSent
226 case streamSYNReceived:
227 flags |= flagACK
228 s.state = streamEstablished
229 }
230 return flags
231}
232
233// sendWindowUpdate potentially sends a window update enabling
234// further writes to take place. Must be invoked with the lock.
235func (s *Stream) sendWindowUpdate() error {
236 s.controlHdrLock.Lock()
237 defer s.controlHdrLock.Unlock()
238
239 // Determine the delta update
240 max := s.session.config.MaxStreamWindowSize
241 delta := max - atomic.LoadUint32(&s.recvWindow)
242
243 // Determine the flags if any
244 flags := s.sendFlags()
245
246 // Check if we can omit the update
247 if delta < (max/2) && flags == 0 {
248 return nil
249 }
250
251 // Update our window
252 atomic.AddUint32(&s.recvWindow, delta)
253
254 // Send the header
255 s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta)
256 if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
257 return err
258 }
259 return nil
260}
261
262// sendClose is used to send a FIN
263func (s *Stream) sendClose() error {
264 s.controlHdrLock.Lock()
265 defer s.controlHdrLock.Unlock()
266
267 flags := s.sendFlags()
268 flags |= flagFIN
269 s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0)
270 if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
271 return err
272 }
273 return nil
274}
275
276// Close is used to close the stream
277func (s *Stream) Close() error {
278 closeStream := false
279 s.stateLock.Lock()
280 switch s.state {
281 // Opened means we need to signal a close
282 case streamSYNSent:
283 fallthrough
284 case streamSYNReceived:
285 fallthrough
286 case streamEstablished:
287 s.state = streamLocalClose
288 goto SEND_CLOSE
289
290 case streamLocalClose:
291 case streamRemoteClose:
292 s.state = streamClosed
293 closeStream = true
294 goto SEND_CLOSE
295
296 case streamClosed:
297 case streamReset:
298 default:
299 panic("unhandled state")
300 }
301 s.stateLock.Unlock()
302 return nil
303SEND_CLOSE:
304 s.stateLock.Unlock()
305 s.sendClose()
306 s.notifyWaiting()
307 if closeStream {
308 s.session.closeStream(s.id)
309 }
310 return nil
311}
312
313// forceClose is used for when the session is exiting
314func (s *Stream) forceClose() {
315 s.stateLock.Lock()
316 s.state = streamClosed
317 s.stateLock.Unlock()
318 s.notifyWaiting()
319}
320
321// processFlags is used to update the state of the stream
322// based on set flags, if any. Lock must be held
323func (s *Stream) processFlags(flags uint16) error {
324 // Close the stream without holding the state lock
325 closeStream := false
326 defer func() {
327 if closeStream {
328 s.session.closeStream(s.id)
329 }
330 }()
331
332 s.stateLock.Lock()
333 defer s.stateLock.Unlock()
334 if flags&flagACK == flagACK {
335 if s.state == streamSYNSent {
336 s.state = streamEstablished
337 }
338 s.session.establishStream(s.id)
339 }
340 if flags&flagFIN == flagFIN {
341 switch s.state {
342 case streamSYNSent:
343 fallthrough
344 case streamSYNReceived:
345 fallthrough
346 case streamEstablished:
347 s.state = streamRemoteClose
348 s.notifyWaiting()
349 case streamLocalClose:
350 s.state = streamClosed
351 closeStream = true
352 s.notifyWaiting()
353 default:
354 s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state)
355 return ErrUnexpectedFlag
356 }
357 }
358 if flags&flagRST == flagRST {
359 s.state = streamReset
360 closeStream = true
361 s.notifyWaiting()
362 }
363 return nil
364}
365
366// notifyWaiting notifies all the waiting channels
367func (s *Stream) notifyWaiting() {
368 asyncNotify(s.recvNotifyCh)
369 asyncNotify(s.sendNotifyCh)
370}
371
372// incrSendWindow updates the size of our send window
373func (s *Stream) incrSendWindow(hdr header, flags uint16) error {
374 if err := s.processFlags(flags); err != nil {
375 return err
376 }
377
378 // Increase window, unblock a sender
379 atomic.AddUint32(&s.sendWindow, hdr.Length())
380 asyncNotify(s.sendNotifyCh)
381 return nil
382}
383
384// readData is used to handle a data frame
385func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error {
386 if err := s.processFlags(flags); err != nil {
387 return err
388 }
389
390 // Check that our recv window is not exceeded
391 length := hdr.Length()
392 if length == 0 {
393 return nil
394 }
395 if remain := atomic.LoadUint32(&s.recvWindow); length > remain {
396 s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length)
397 return ErrRecvWindowExceeded
398 }
399
400 // Wrap in a limited reader
401 conn = &io.LimitedReader{R: conn, N: int64(length)}
402
403 // Copy into buffer
404 s.recvLock.Lock()
405 if s.recvBuf == nil {
406 // Allocate the receive buffer just-in-time to fit the full data frame.
407 // This way we can read in the whole packet without further allocations.
408 s.recvBuf = bytes.NewBuffer(make([]byte, 0, length))
409 }
410 if _, err := io.Copy(s.recvBuf, conn); err != nil {
411 s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err)
412 s.recvLock.Unlock()
413 return err
414 }
415
416 // Decrement the receive window
417 atomic.AddUint32(&s.recvWindow, ^uint32(length-1))
418 s.recvLock.Unlock()
419
420 // Unblock any readers
421 asyncNotify(s.recvNotifyCh)
422 return nil
423}
424
425// SetDeadline sets the read and write deadlines
426func (s *Stream) SetDeadline(t time.Time) error {
427 if err := s.SetReadDeadline(t); err != nil {
428 return err
429 }
430 if err := s.SetWriteDeadline(t); err != nil {
431 return err
432 }
433 return nil
434}
435
436// SetReadDeadline sets the deadline for future Read calls.
437func (s *Stream) SetReadDeadline(t time.Time) error {
438 s.readDeadline = t
439 return nil
440}
441
442// SetWriteDeadline sets the deadline for future Write calls
443func (s *Stream) SetWriteDeadline(t time.Time) error {
444 s.writeDeadline = t
445 return nil
446}
447
448// Shrink is used to compact the amount of buffers utilized
449// This is useful when using Yamux in a connection pool to reduce
450// the idle memory utilization.
451func (s *Stream) Shrink() {
452 s.recvLock.Lock()
453 if s.recvBuf != nil && s.recvBuf.Len() == 0 {
454 s.recvBuf = nil
455 }
456 s.recvLock.Unlock()
457}
diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go
new file mode 100644
index 0000000..5fe45af
--- /dev/null
+++ b/vendor/github.com/hashicorp/yamux/util.go
@@ -0,0 +1,28 @@
1package yamux
2
3// asyncSendErr is used to try an async send of an error
4func asyncSendErr(ch chan error, err error) {
5 if ch == nil {
6 return
7 }
8 select {
9 case ch <- err:
10 default:
11 }
12}
13
14// asyncNotify is used to signal a waiting goroutine
15func asyncNotify(ch chan struct{}) {
16 select {
17 case ch <- struct{}{}:
18 default:
19 }
20}
21
22// min computes the minimum of two values
23func min(a, b uint32) uint32 {
24 if a < b {
25 return a
26 }
27 return b
28}
diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE
new file mode 100644
index 0000000..b03310a
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/LICENSE
@@ -0,0 +1,13 @@
1Copyright 2015 James Saryerwinnie
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile
new file mode 100644
index 0000000..a828d28
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/Makefile
@@ -0,0 +1,44 @@
1
2CMD = jpgo
3
4help:
5 @echo "Please use \`make <target>' where <target> is one of"
6 @echo " test to run all the tests"
7 @echo " build to build the library and jp executable"
8 @echo " generate to run codegen"
9
10
11generate:
12 go generate ./...
13
14build:
15 rm -f $(CMD)
16 go build ./...
17 rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
18 mv cmd/$(CMD)/$(CMD) .
19
20test:
21 go test -v ./...
22
23check:
24 go vet ./...
25 @echo "golint ./..."
26 @lint=`golint ./...`; \
27 lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
28 echo "$$lint"; \
29 if [ "$$lint" != "" ]; then exit 1; fi
30
31htmlc:
32 go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov
33
34buildfuzz:
35 go-fuzz-build github.com/jmespath/go-jmespath/fuzz
36
37fuzz: buildfuzz
38 go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata
39
40bench:
41 go test -bench . -cpuprofile cpu.out
42
43pprof-cpu:
44 go tool pprof ./go-jmespath.test ./cpu.out
diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md
new file mode 100644
index 0000000..187ef67
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/README.md
@@ -0,0 +1,7 @@
1# go-jmespath - A JMESPath implementation in Go
2
3[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath)
4
5
6
7See http://jmespath.org for more info.
diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go
new file mode 100644
index 0000000..9cfa988
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/api.go
@@ -0,0 +1,49 @@
1package jmespath
2
3import "strconv"
4
5// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
6// safe for concurrent use by multiple goroutines.
7type JMESPath struct {
8 ast ASTNode
9 intr *treeInterpreter
10}
11
12// Compile parses a JMESPath expression and returns, if successful, a JMESPath
13// object that can be used to match against data.
14func Compile(expression string) (*JMESPath, error) {
15 parser := NewParser()
16 ast, err := parser.Parse(expression)
17 if err != nil {
18 return nil, err
19 }
20 jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
21 return jmespath, nil
22}
23
24// MustCompile is like Compile but panics if the expression cannot be parsed.
25// It simplifies safe initialization of global variables holding compiled
26// JMESPaths.
27func MustCompile(expression string) *JMESPath {
28 jmespath, err := Compile(expression)
29 if err != nil {
30 panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
31 }
32 return jmespath
33}
34
35// Search evaluates a JMESPath expression against input data and returns the result.
36func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
37 return jp.intr.Execute(jp.ast, data)
38}
39
40// Search evaluates a JMESPath expression against input data and returns the result.
41func Search(expression string, data interface{}) (interface{}, error) {
42 intr := newInterpreter()
43 parser := NewParser()
44 ast, err := parser.Parse(expression)
45 if err != nil {
46 return nil, err
47 }
48 return intr.Execute(ast, data)
49}
diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
new file mode 100644
index 0000000..1cd2d23
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
@@ -0,0 +1,16 @@
1// generated by stringer -type astNodeType; DO NOT EDIT
2
3package jmespath
4
5import "fmt"
6
7const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
8
9var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
10
11func (i astNodeType) String() string {
12 if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
13 return fmt.Sprintf("astNodeType(%d)", i)
14 }
15 return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
16}
diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go
new file mode 100644
index 0000000..9b7cd89
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/functions.go
@@ -0,0 +1,842 @@
1package jmespath
2
3import (
4 "encoding/json"
5 "errors"
6 "fmt"
7 "math"
8 "reflect"
9 "sort"
10 "strconv"
11 "strings"
12 "unicode/utf8"
13)
14
15type jpFunction func(arguments []interface{}) (interface{}, error)
16
17type jpType string
18
19const (
20 jpUnknown jpType = "unknown"
21 jpNumber jpType = "number"
22 jpString jpType = "string"
23 jpArray jpType = "array"
24 jpObject jpType = "object"
25 jpArrayNumber jpType = "array[number]"
26 jpArrayString jpType = "array[string]"
27 jpExpref jpType = "expref"
28 jpAny jpType = "any"
29)
30
31type functionEntry struct {
32 name string
33 arguments []argSpec
34 handler jpFunction
35 hasExpRef bool
36}
37
38type argSpec struct {
39 types []jpType
40 variadic bool
41}
42
43type byExprString struct {
44 intr *treeInterpreter
45 node ASTNode
46 items []interface{}
47 hasError bool
48}
49
50func (a *byExprString) Len() int {
51 return len(a.items)
52}
53func (a *byExprString) Swap(i, j int) {
54 a.items[i], a.items[j] = a.items[j], a.items[i]
55}
56func (a *byExprString) Less(i, j int) bool {
57 first, err := a.intr.Execute(a.node, a.items[i])
58 if err != nil {
59 a.hasError = true
60 // Return a dummy value.
61 return true
62 }
63 ith, ok := first.(string)
64 if !ok {
65 a.hasError = true
66 return true
67 }
68 second, err := a.intr.Execute(a.node, a.items[j])
69 if err != nil {
70 a.hasError = true
71 // Return a dummy value.
72 return true
73 }
74 jth, ok := second.(string)
75 if !ok {
76 a.hasError = true
77 return true
78 }
79 return ith < jth
80}
81
82type byExprFloat struct {
83 intr *treeInterpreter
84 node ASTNode
85 items []interface{}
86 hasError bool
87}
88
89func (a *byExprFloat) Len() int {
90 return len(a.items)
91}
92func (a *byExprFloat) Swap(i, j int) {
93 a.items[i], a.items[j] = a.items[j], a.items[i]
94}
95func (a *byExprFloat) Less(i, j int) bool {
96 first, err := a.intr.Execute(a.node, a.items[i])
97 if err != nil {
98 a.hasError = true
99 // Return a dummy value.
100 return true
101 }
102 ith, ok := first.(float64)
103 if !ok {
104 a.hasError = true
105 return true
106 }
107 second, err := a.intr.Execute(a.node, a.items[j])
108 if err != nil {
109 a.hasError = true
110 // Return a dummy value.
111 return true
112 }
113 jth, ok := second.(float64)
114 if !ok {
115 a.hasError = true
116 return true
117 }
118 return ith < jth
119}
120
121type functionCaller struct {
122 functionTable map[string]functionEntry
123}
124
125func newFunctionCaller() *functionCaller {
126 caller := &functionCaller{}
127 caller.functionTable = map[string]functionEntry{
128 "length": {
129 name: "length",
130 arguments: []argSpec{
131 {types: []jpType{jpString, jpArray, jpObject}},
132 },
133 handler: jpfLength,
134 },
135 "starts_with": {
136 name: "starts_with",
137 arguments: []argSpec{
138 {types: []jpType{jpString}},
139 {types: []jpType{jpString}},
140 },
141 handler: jpfStartsWith,
142 },
143 "abs": {
144 name: "abs",
145 arguments: []argSpec{
146 {types: []jpType{jpNumber}},
147 },
148 handler: jpfAbs,
149 },
150 "avg": {
151 name: "avg",
152 arguments: []argSpec{
153 {types: []jpType{jpArrayNumber}},
154 },
155 handler: jpfAvg,
156 },
157 "ceil": {
158 name: "ceil",
159 arguments: []argSpec{
160 {types: []jpType{jpNumber}},
161 },
162 handler: jpfCeil,
163 },
164 "contains": {
165 name: "contains",
166 arguments: []argSpec{
167 {types: []jpType{jpArray, jpString}},
168 {types: []jpType{jpAny}},
169 },
170 handler: jpfContains,
171 },
172 "ends_with": {
173 name: "ends_with",
174 arguments: []argSpec{
175 {types: []jpType{jpString}},
176 {types: []jpType{jpString}},
177 },
178 handler: jpfEndsWith,
179 },
180 "floor": {
181 name: "floor",
182 arguments: []argSpec{
183 {types: []jpType{jpNumber}},
184 },
185 handler: jpfFloor,
186 },
187 "map": {
188 name: "amp",
189 arguments: []argSpec{
190 {types: []jpType{jpExpref}},
191 {types: []jpType{jpArray}},
192 },
193 handler: jpfMap,
194 hasExpRef: true,
195 },
196 "max": {
197 name: "max",
198 arguments: []argSpec{
199 {types: []jpType{jpArrayNumber, jpArrayString}},
200 },
201 handler: jpfMax,
202 },
203 "merge": {
204 name: "merge",
205 arguments: []argSpec{
206 {types: []jpType{jpObject}, variadic: true},
207 },
208 handler: jpfMerge,
209 },
210 "max_by": {
211 name: "max_by",
212 arguments: []argSpec{
213 {types: []jpType{jpArray}},
214 {types: []jpType{jpExpref}},
215 },
216 handler: jpfMaxBy,
217 hasExpRef: true,
218 },
219 "sum": {
220 name: "sum",
221 arguments: []argSpec{
222 {types: []jpType{jpArrayNumber}},
223 },
224 handler: jpfSum,
225 },
226 "min": {
227 name: "min",
228 arguments: []argSpec{
229 {types: []jpType{jpArrayNumber, jpArrayString}},
230 },
231 handler: jpfMin,
232 },
233 "min_by": {
234 name: "min_by",
235 arguments: []argSpec{
236 {types: []jpType{jpArray}},
237 {types: []jpType{jpExpref}},
238 },
239 handler: jpfMinBy,
240 hasExpRef: true,
241 },
242 "type": {
243 name: "type",
244 arguments: []argSpec{
245 {types: []jpType{jpAny}},
246 },
247 handler: jpfType,
248 },
249 "keys": {
250 name: "keys",
251 arguments: []argSpec{
252 {types: []jpType{jpObject}},
253 },
254 handler: jpfKeys,
255 },
256 "values": {
257 name: "values",
258 arguments: []argSpec{
259 {types: []jpType{jpObject}},
260 },
261 handler: jpfValues,
262 },
263 "sort": {
264 name: "sort",
265 arguments: []argSpec{
266 {types: []jpType{jpArrayString, jpArrayNumber}},
267 },
268 handler: jpfSort,
269 },
270 "sort_by": {
271 name: "sort_by",
272 arguments: []argSpec{
273 {types: []jpType{jpArray}},
274 {types: []jpType{jpExpref}},
275 },
276 handler: jpfSortBy,
277 hasExpRef: true,
278 },
279 "join": {
280 name: "join",
281 arguments: []argSpec{
282 {types: []jpType{jpString}},
283 {types: []jpType{jpArrayString}},
284 },
285 handler: jpfJoin,
286 },
287 "reverse": {
288 name: "reverse",
289 arguments: []argSpec{
290 {types: []jpType{jpArray, jpString}},
291 },
292 handler: jpfReverse,
293 },
294 "to_array": {
295 name: "to_array",
296 arguments: []argSpec{
297 {types: []jpType{jpAny}},
298 },
299 handler: jpfToArray,
300 },
301 "to_string": {
302 name: "to_string",
303 arguments: []argSpec{
304 {types: []jpType{jpAny}},
305 },
306 handler: jpfToString,
307 },
308 "to_number": {
309 name: "to_number",
310 arguments: []argSpec{
311 {types: []jpType{jpAny}},
312 },
313 handler: jpfToNumber,
314 },
315 "not_null": {
316 name: "not_null",
317 arguments: []argSpec{
318 {types: []jpType{jpAny}, variadic: true},
319 },
320 handler: jpfNotNull,
321 },
322 }
323 return caller
324}
325
326func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
327 if len(e.arguments) == 0 {
328 return arguments, nil
329 }
330 if !e.arguments[len(e.arguments)-1].variadic {
331 if len(e.arguments) != len(arguments) {
332 return nil, errors.New("incorrect number of args")
333 }
334 for i, spec := range e.arguments {
335 userArg := arguments[i]
336 err := spec.typeCheck(userArg)
337 if err != nil {
338 return nil, err
339 }
340 }
341 return arguments, nil
342 }
343 if len(arguments) < len(e.arguments) {
344 return nil, errors.New("Invalid arity.")
345 }
346 return arguments, nil
347}
348
349func (a *argSpec) typeCheck(arg interface{}) error {
350 for _, t := range a.types {
351 switch t {
352 case jpNumber:
353 if _, ok := arg.(float64); ok {
354 return nil
355 }
356 case jpString:
357 if _, ok := arg.(string); ok {
358 return nil
359 }
360 case jpArray:
361 if isSliceType(arg) {
362 return nil
363 }
364 case jpObject:
365 if _, ok := arg.(map[string]interface{}); ok {
366 return nil
367 }
368 case jpArrayNumber:
369 if _, ok := toArrayNum(arg); ok {
370 return nil
371 }
372 case jpArrayString:
373 if _, ok := toArrayStr(arg); ok {
374 return nil
375 }
376 case jpAny:
377 return nil
378 case jpExpref:
379 if _, ok := arg.(expRef); ok {
380 return nil
381 }
382 }
383 }
384 return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
385}
386
387func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
388 entry, ok := f.functionTable[name]
389 if !ok {
390 return nil, errors.New("unknown function: " + name)
391 }
392 resolvedArgs, err := entry.resolveArgs(arguments)
393 if err != nil {
394 return nil, err
395 }
396 if entry.hasExpRef {
397 var extra []interface{}
398 extra = append(extra, intr)
399 resolvedArgs = append(extra, resolvedArgs...)
400 }
401 return entry.handler(resolvedArgs)
402}
403
404func jpfAbs(arguments []interface{}) (interface{}, error) {
405 num := arguments[0].(float64)
406 return math.Abs(num), nil
407}
408
409func jpfLength(arguments []interface{}) (interface{}, error) {
410 arg := arguments[0]
411 if c, ok := arg.(string); ok {
412 return float64(utf8.RuneCountInString(c)), nil
413 } else if isSliceType(arg) {
414 v := reflect.ValueOf(arg)
415 return float64(v.Len()), nil
416 } else if c, ok := arg.(map[string]interface{}); ok {
417 return float64(len(c)), nil
418 }
419 return nil, errors.New("could not compute length()")
420}
421
422func jpfStartsWith(arguments []interface{}) (interface{}, error) {
423 search := arguments[0].(string)
424 prefix := arguments[1].(string)
425 return strings.HasPrefix(search, prefix), nil
426}
427
428func jpfAvg(arguments []interface{}) (interface{}, error) {
429 // We've already type checked the value so we can safely use
430 // type assertions.
431 args := arguments[0].([]interface{})
432 length := float64(len(args))
433 numerator := 0.0
434 for _, n := range args {
435 numerator += n.(float64)
436 }
437 return numerator / length, nil
438}
439func jpfCeil(arguments []interface{}) (interface{}, error) {
440 val := arguments[0].(float64)
441 return math.Ceil(val), nil
442}
443func jpfContains(arguments []interface{}) (interface{}, error) {
444 search := arguments[0]
445 el := arguments[1]
446 if searchStr, ok := search.(string); ok {
447 if elStr, ok := el.(string); ok {
448 return strings.Index(searchStr, elStr) != -1, nil
449 }
450 return false, nil
451 }
452 // Otherwise this is a generic contains for []interface{}
453 general := search.([]interface{})
454 for _, item := range general {
455 if item == el {
456 return true, nil
457 }
458 }
459 return false, nil
460}
461func jpfEndsWith(arguments []interface{}) (interface{}, error) {
462 search := arguments[0].(string)
463 suffix := arguments[1].(string)
464 return strings.HasSuffix(search, suffix), nil
465}
466func jpfFloor(arguments []interface{}) (interface{}, error) {
467 val := arguments[0].(float64)
468 return math.Floor(val), nil
469}
470func jpfMap(arguments []interface{}) (interface{}, error) {
471 intr := arguments[0].(*treeInterpreter)
472 exp := arguments[1].(expRef)
473 node := exp.ref
474 arr := arguments[2].([]interface{})
475 mapped := make([]interface{}, 0, len(arr))
476 for _, value := range arr {
477 current, err := intr.Execute(node, value)
478 if err != nil {
479 return nil, err
480 }
481 mapped = append(mapped, current)
482 }
483 return mapped, nil
484}
485func jpfMax(arguments []interface{}) (interface{}, error) {
486 if items, ok := toArrayNum(arguments[0]); ok {
487 if len(items) == 0 {
488 return nil, nil
489 }
490 if len(items) == 1 {
491 return items[0], nil
492 }
493 best := items[0]
494 for _, item := range items[1:] {
495 if item > best {
496 best = item
497 }
498 }
499 return best, nil
500 }
501 // Otherwise we're dealing with a max() of strings.
502 items, _ := toArrayStr(arguments[0])
503 if len(items) == 0 {
504 return nil, nil
505 }
506 if len(items) == 1 {
507 return items[0], nil
508 }
509 best := items[0]
510 for _, item := range items[1:] {
511 if item > best {
512 best = item
513 }
514 }
515 return best, nil
516}
517func jpfMerge(arguments []interface{}) (interface{}, error) {
518 final := make(map[string]interface{})
519 for _, m := range arguments {
520 mapped := m.(map[string]interface{})
521 for key, value := range mapped {
522 final[key] = value
523 }
524 }
525 return final, nil
526}
527func jpfMaxBy(arguments []interface{}) (interface{}, error) {
528 intr := arguments[0].(*treeInterpreter)
529 arr := arguments[1].([]interface{})
530 exp := arguments[2].(expRef)
531 node := exp.ref
532 if len(arr) == 0 {
533 return nil, nil
534 } else if len(arr) == 1 {
535 return arr[0], nil
536 }
537 start, err := intr.Execute(node, arr[0])
538 if err != nil {
539 return nil, err
540 }
541 switch t := start.(type) {
542 case float64:
543 bestVal := t
544 bestItem := arr[0]
545 for _, item := range arr[1:] {
546 result, err := intr.Execute(node, item)
547 if err != nil {
548 return nil, err
549 }
550 current, ok := result.(float64)
551 if !ok {
552 return nil, errors.New("invalid type, must be number")
553 }
554 if current > bestVal {
555 bestVal = current
556 bestItem = item
557 }
558 }
559 return bestItem, nil
560 case string:
561 bestVal := t
562 bestItem := arr[0]
563 for _, item := range arr[1:] {
564 result, err := intr.Execute(node, item)
565 if err != nil {
566 return nil, err
567 }
568 current, ok := result.(string)
569 if !ok {
570 return nil, errors.New("invalid type, must be string")
571 }
572 if current > bestVal {
573 bestVal = current
574 bestItem = item
575 }
576 }
577 return bestItem, nil
578 default:
579 return nil, errors.New("invalid type, must be number of string")
580 }
581}
582func jpfSum(arguments []interface{}) (interface{}, error) {
583 items, _ := toArrayNum(arguments[0])
584 sum := 0.0
585 for _, item := range items {
586 sum += item
587 }
588 return sum, nil
589}
590
591func jpfMin(arguments []interface{}) (interface{}, error) {
592 if items, ok := toArrayNum(arguments[0]); ok {
593 if len(items) == 0 {
594 return nil, nil
595 }
596 if len(items) == 1 {
597 return items[0], nil
598 }
599 best := items[0]
600 for _, item := range items[1:] {
601 if item < best {
602 best = item
603 }
604 }
605 return best, nil
606 }
607 items, _ := toArrayStr(arguments[0])
608 if len(items) == 0 {
609 return nil, nil
610 }
611 if len(items) == 1 {
612 return items[0], nil
613 }
614 best := items[0]
615 for _, item := range items[1:] {
616 if item < best {
617 best = item
618 }
619 }
620 return best, nil
621}
622
623func jpfMinBy(arguments []interface{}) (interface{}, error) {
624 intr := arguments[0].(*treeInterpreter)
625 arr := arguments[1].([]interface{})
626 exp := arguments[2].(expRef)
627 node := exp.ref
628 if len(arr) == 0 {
629 return nil, nil
630 } else if len(arr) == 1 {
631 return arr[0], nil
632 }
633 start, err := intr.Execute(node, arr[0])
634 if err != nil {
635 return nil, err
636 }
637 if t, ok := start.(float64); ok {
638 bestVal := t
639 bestItem := arr[0]
640 for _, item := range arr[1:] {
641 result, err := intr.Execute(node, item)
642 if err != nil {
643 return nil, err
644 }
645 current, ok := result.(float64)
646 if !ok {
647 return nil, errors.New("invalid type, must be number")
648 }
649 if current < bestVal {
650 bestVal = current
651 bestItem = item
652 }
653 }
654 return bestItem, nil
655 } else if t, ok := start.(string); ok {
656 bestVal := t
657 bestItem := arr[0]
658 for _, item := range arr[1:] {
659 result, err := intr.Execute(node, item)
660 if err != nil {
661 return nil, err
662 }
663 current, ok := result.(string)
664 if !ok {
665 return nil, errors.New("invalid type, must be string")
666 }
667 if current < bestVal {
668 bestVal = current
669 bestItem = item
670 }
671 }
672 return bestItem, nil
673 } else {
674 return nil, errors.New("invalid type, must be number of string")
675 }
676}
677func jpfType(arguments []interface{}) (interface{}, error) {
678 arg := arguments[0]
679 if _, ok := arg.(float64); ok {
680 return "number", nil
681 }
682 if _, ok := arg.(string); ok {
683 return "string", nil
684 }
685 if _, ok := arg.([]interface{}); ok {
686 return "array", nil
687 }
688 if _, ok := arg.(map[string]interface{}); ok {
689 return "object", nil
690 }
691 if arg == nil {
692 return "null", nil
693 }
694 if arg == true || arg == false {
695 return "boolean", nil
696 }
697 return nil, errors.New("unknown type")
698}
699func jpfKeys(arguments []interface{}) (interface{}, error) {
700 arg := arguments[0].(map[string]interface{})
701 collected := make([]interface{}, 0, len(arg))
702 for key := range arg {
703 collected = append(collected, key)
704 }
705 return collected, nil
706}
707func jpfValues(arguments []interface{}) (interface{}, error) {
708 arg := arguments[0].(map[string]interface{})
709 collected := make([]interface{}, 0, len(arg))
710 for _, value := range arg {
711 collected = append(collected, value)
712 }
713 return collected, nil
714}
715func jpfSort(arguments []interface{}) (interface{}, error) {
716 if items, ok := toArrayNum(arguments[0]); ok {
717 d := sort.Float64Slice(items)
718 sort.Stable(d)
719 final := make([]interface{}, len(d))
720 for i, val := range d {
721 final[i] = val
722 }
723 return final, nil
724 }
725 // Otherwise we're dealing with sort()'ing strings.
726 items, _ := toArrayStr(arguments[0])
727 d := sort.StringSlice(items)
728 sort.Stable(d)
729 final := make([]interface{}, len(d))
730 for i, val := range d {
731 final[i] = val
732 }
733 return final, nil
734}
735func jpfSortBy(arguments []interface{}) (interface{}, error) {
736 intr := arguments[0].(*treeInterpreter)
737 arr := arguments[1].([]interface{})
738 exp := arguments[2].(expRef)
739 node := exp.ref
740 if len(arr) == 0 {
741 return arr, nil
742 } else if len(arr) == 1 {
743 return arr, nil
744 }
745 start, err := intr.Execute(node, arr[0])
746 if err != nil {
747 return nil, err
748 }
749 if _, ok := start.(float64); ok {
750 sortable := &byExprFloat{intr, node, arr, false}
751 sort.Stable(sortable)
752 if sortable.hasError {
753 return nil, errors.New("error in sort_by comparison")
754 }
755 return arr, nil
756 } else if _, ok := start.(string); ok {
757 sortable := &byExprString{intr, node, arr, false}
758 sort.Stable(sortable)
759 if sortable.hasError {
760 return nil, errors.New("error in sort_by comparison")
761 }
762 return arr, nil
763 } else {
764 return nil, errors.New("invalid type, must be number of string")
765 }
766}
767func jpfJoin(arguments []interface{}) (interface{}, error) {
768 sep := arguments[0].(string)
769 // We can't just do arguments[1].([]string), we have to
770 // manually convert each item to a string.
771 arrayStr := []string{}
772 for _, item := range arguments[1].([]interface{}) {
773 arrayStr = append(arrayStr, item.(string))
774 }
775 return strings.Join(arrayStr, sep), nil
776}
777func jpfReverse(arguments []interface{}) (interface{}, error) {
778 if s, ok := arguments[0].(string); ok {
779 r := []rune(s)
780 for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
781 r[i], r[j] = r[j], r[i]
782 }
783 return string(r), nil
784 }
785 items := arguments[0].([]interface{})
786 length := len(items)
787 reversed := make([]interface{}, length)
788 for i, item := range items {
789 reversed[length-(i+1)] = item
790 }
791 return reversed, nil
792}
793func jpfToArray(arguments []interface{}) (interface{}, error) {
794 if _, ok := arguments[0].([]interface{}); ok {
795 return arguments[0], nil
796 }
797 return arguments[:1:1], nil
798}
799func jpfToString(arguments []interface{}) (interface{}, error) {
800 if v, ok := arguments[0].(string); ok {
801 return v, nil
802 }
803 result, err := json.Marshal(arguments[0])
804 if err != nil {
805 return nil, err
806 }
807 return string(result), nil
808}
809func jpfToNumber(arguments []interface{}) (interface{}, error) {
810 arg := arguments[0]
811 if v, ok := arg.(float64); ok {
812 return v, nil
813 }
814 if v, ok := arg.(string); ok {
815 conv, err := strconv.ParseFloat(v, 64)
816 if err != nil {
817 return nil, nil
818 }
819 return conv, nil
820 }
821 if _, ok := arg.([]interface{}); ok {
822 return nil, nil
823 }
824 if _, ok := arg.(map[string]interface{}); ok {
825 return nil, nil
826 }
827 if arg == nil {
828 return nil, nil
829 }
830 if arg == true || arg == false {
831 return nil, nil
832 }
833 return nil, errors.New("unknown type")
834}
835func jpfNotNull(arguments []interface{}) (interface{}, error) {
836 for _, arg := range arguments {
837 if arg != nil {
838 return arg, nil
839 }
840 }
841 return nil, nil
842}
diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go
new file mode 100644
index 0000000..13c7460
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go
@@ -0,0 +1,418 @@
1package jmespath
2
3import (
4 "errors"
5 "reflect"
6 "unicode"
7 "unicode/utf8"
8)
9
10/* This is a tree based interpreter. It walks the AST and directly
11 interprets the AST to search through a JSON document.
12*/
13
14type treeInterpreter struct {
15 fCall *functionCaller
16}
17
18func newInterpreter() *treeInterpreter {
19 interpreter := treeInterpreter{}
20 interpreter.fCall = newFunctionCaller()
21 return &interpreter
22}
23
24type expRef struct {
25 ref ASTNode
26}
27
28// Execute takes an ASTNode and input data and interprets the AST directly.
29// It will produce the result of applying the JMESPath expression associated
30// with the ASTNode to the input data "value".
31func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
32 switch node.nodeType {
33 case ASTComparator:
34 left, err := intr.Execute(node.children[0], value)
35 if err != nil {
36 return nil, err
37 }
38 right, err := intr.Execute(node.children[1], value)
39 if err != nil {
40 return nil, err
41 }
42 switch node.value {
43 case tEQ:
44 return objsEqual(left, right), nil
45 case tNE:
46 return !objsEqual(left, right), nil
47 }
48 leftNum, ok := left.(float64)
49 if !ok {
50 return nil, nil
51 }
52 rightNum, ok := right.(float64)
53 if !ok {
54 return nil, nil
55 }
56 switch node.value {
57 case tGT:
58 return leftNum > rightNum, nil
59 case tGTE:
60 return leftNum >= rightNum, nil
61 case tLT:
62 return leftNum < rightNum, nil
63 case tLTE:
64 return leftNum <= rightNum, nil
65 }
66 case ASTExpRef:
67 return expRef{ref: node.children[0]}, nil
68 case ASTFunctionExpression:
69 resolvedArgs := []interface{}{}
70 for _, arg := range node.children {
71 current, err := intr.Execute(arg, value)
72 if err != nil {
73 return nil, err
74 }
75 resolvedArgs = append(resolvedArgs, current)
76 }
77 return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
78 case ASTField:
79 if m, ok := value.(map[string]interface{}); ok {
80 key := node.value.(string)
81 return m[key], nil
82 }
83 return intr.fieldFromStruct(node.value.(string), value)
84 case ASTFilterProjection:
85 left, err := intr.Execute(node.children[0], value)
86 if err != nil {
87 return nil, nil
88 }
89 sliceType, ok := left.([]interface{})
90 if !ok {
91 if isSliceType(left) {
92 return intr.filterProjectionWithReflection(node, left)
93 }
94 return nil, nil
95 }
96 compareNode := node.children[2]
97 collected := []interface{}{}
98 for _, element := range sliceType {
99 result, err := intr.Execute(compareNode, element)
100 if err != nil {
101 return nil, err
102 }
103 if !isFalse(result) {
104 current, err := intr.Execute(node.children[1], element)
105 if err != nil {
106 return nil, err
107 }
108 if current != nil {
109 collected = append(collected, current)
110 }
111 }
112 }
113 return collected, nil
114 case ASTFlatten:
115 left, err := intr.Execute(node.children[0], value)
116 if err != nil {
117 return nil, nil
118 }
119 sliceType, ok := left.([]interface{})
120 if !ok {
121 // If we can't type convert to []interface{}, there's
122 // a chance this could still work via reflection if we're
123 // dealing with user provided types.
124 if isSliceType(left) {
125 return intr.flattenWithReflection(left)
126 }
127 return nil, nil
128 }
129 flattened := []interface{}{}
130 for _, element := range sliceType {
131 if elementSlice, ok := element.([]interface{}); ok {
132 flattened = append(flattened, elementSlice...)
133 } else if isSliceType(element) {
134 reflectFlat := []interface{}{}
135 v := reflect.ValueOf(element)
136 for i := 0; i < v.Len(); i++ {
137 reflectFlat = append(reflectFlat, v.Index(i).Interface())
138 }
139 flattened = append(flattened, reflectFlat...)
140 } else {
141 flattened = append(flattened, element)
142 }
143 }
144 return flattened, nil
145 case ASTIdentity, ASTCurrentNode:
146 return value, nil
147 case ASTIndex:
148 if sliceType, ok := value.([]interface{}); ok {
149 index := node.value.(int)
150 if index < 0 {
151 index += len(sliceType)
152 }
153 if index < len(sliceType) && index >= 0 {
154 return sliceType[index], nil
155 }
156 return nil, nil
157 }
158 // Otherwise try via reflection.
159 rv := reflect.ValueOf(value)
160 if rv.Kind() == reflect.Slice {
161 index := node.value.(int)
162 if index < 0 {
163 index += rv.Len()
164 }
165 if index < rv.Len() && index >= 0 {
166 v := rv.Index(index)
167 return v.Interface(), nil
168 }
169 }
170 return nil, nil
171 case ASTKeyValPair:
172 return intr.Execute(node.children[0], value)
173 case ASTLiteral:
174 return node.value, nil
175 case ASTMultiSelectHash:
176 if value == nil {
177 return nil, nil
178 }
179 collected := make(map[string]interface{})
180 for _, child := range node.children {
181 current, err := intr.Execute(child, value)
182 if err != nil {
183 return nil, err
184 }
185 key := child.value.(string)
186 collected[key] = current
187 }
188 return collected, nil
189 case ASTMultiSelectList:
190 if value == nil {
191 return nil, nil
192 }
193 collected := []interface{}{}
194 for _, child := range node.children {
195 current, err := intr.Execute(child, value)
196 if err != nil {
197 return nil, err
198 }
199 collected = append(collected, current)
200 }
201 return collected, nil
202 case ASTOrExpression:
203 matched, err := intr.Execute(node.children[0], value)
204 if err != nil {
205 return nil, err
206 }
207 if isFalse(matched) {
208 matched, err = intr.Execute(node.children[1], value)
209 if err != nil {
210 return nil, err
211 }
212 }
213 return matched, nil
214 case ASTAndExpression:
215 matched, err := intr.Execute(node.children[0], value)
216 if err != nil {
217 return nil, err
218 }
219 if isFalse(matched) {
220 return matched, nil
221 }
222 return intr.Execute(node.children[1], value)
223 case ASTNotExpression:
224 matched, err := intr.Execute(node.children[0], value)
225 if err != nil {
226 return nil, err
227 }
228 if isFalse(matched) {
229 return true, nil
230 }
231 return false, nil
232 case ASTPipe:
233 result := value
234 var err error
235 for _, child := range node.children {
236 result, err = intr.Execute(child, result)
237 if err != nil {
238 return nil, err
239 }
240 }
241 return result, nil
242 case ASTProjection:
243 left, err := intr.Execute(node.children[0], value)
244 if err != nil {
245 return nil, err
246 }
247 sliceType, ok := left.([]interface{})
248 if !ok {
249 if isSliceType(left) {
250 return intr.projectWithReflection(node, left)
251 }
252 return nil, nil
253 }
254 collected := []interface{}{}
255 var current interface{}
256 for _, element := range sliceType {
257 current, err = intr.Execute(node.children[1], element)
258 if err != nil {
259 return nil, err
260 }
261 if current != nil {
262 collected = append(collected, current)
263 }
264 }
265 return collected, nil
266 case ASTSubexpression, ASTIndexExpression:
267 left, err := intr.Execute(node.children[0], value)
268 if err != nil {
269 return nil, err
270 }
271 return intr.Execute(node.children[1], left)
272 case ASTSlice:
273 sliceType, ok := value.([]interface{})
274 if !ok {
275 if isSliceType(value) {
276 return intr.sliceWithReflection(node, value)
277 }
278 return nil, nil
279 }
280 parts := node.value.([]*int)
281 sliceParams := make([]sliceParam, 3)
282 for i, part := range parts {
283 if part != nil {
284 sliceParams[i].Specified = true
285 sliceParams[i].N = *part
286 }
287 }
288 return slice(sliceType, sliceParams)
289 case ASTValueProjection:
290 left, err := intr.Execute(node.children[0], value)
291 if err != nil {
292 return nil, nil
293 }
294 mapType, ok := left.(map[string]interface{})
295 if !ok {
296 return nil, nil
297 }
298 values := make([]interface{}, len(mapType))
299 for _, value := range mapType {
300 values = append(values, value)
301 }
302 collected := []interface{}{}
303 for _, element := range values {
304 current, err := intr.Execute(node.children[1], element)
305 if err != nil {
306 return nil, err
307 }
308 if current != nil {
309 collected = append(collected, current)
310 }
311 }
312 return collected, nil
313 }
314 return nil, errors.New("Unknown AST node: " + node.nodeType.String())
315}
316
317func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
318 rv := reflect.ValueOf(value)
319 first, n := utf8.DecodeRuneInString(key)
320 fieldName := string(unicode.ToUpper(first)) + key[n:]
321 if rv.Kind() == reflect.Struct {
322 v := rv.FieldByName(fieldName)
323 if !v.IsValid() {
324 return nil, nil
325 }
326 return v.Interface(), nil
327 } else if rv.Kind() == reflect.Ptr {
328 // Handle multiple levels of indirection?
329 if rv.IsNil() {
330 return nil, nil
331 }
332 rv = rv.Elem()
333 v := rv.FieldByName(fieldName)
334 if !v.IsValid() {
335 return nil, nil
336 }
337 return v.Interface(), nil
338 }
339 return nil, nil
340}
341
342func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
343 v := reflect.ValueOf(value)
344 flattened := []interface{}{}
345 for i := 0; i < v.Len(); i++ {
346 element := v.Index(i).Interface()
347 if reflect.TypeOf(element).Kind() == reflect.Slice {
348 // Then insert the contents of the element
349 // slice into the flattened slice,
350 // i.e flattened = append(flattened, mySlice...)
351 elementV := reflect.ValueOf(element)
352 for j := 0; j < elementV.Len(); j++ {
353 flattened = append(
354 flattened, elementV.Index(j).Interface())
355 }
356 } else {
357 flattened = append(flattened, element)
358 }
359 }
360 return flattened, nil
361}
362
363func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
364 v := reflect.ValueOf(value)
365 parts := node.value.([]*int)
366 sliceParams := make([]sliceParam, 3)
367 for i, part := range parts {
368 if part != nil {
369 sliceParams[i].Specified = true
370 sliceParams[i].N = *part
371 }
372 }
373 final := []interface{}{}
374 for i := 0; i < v.Len(); i++ {
375 element := v.Index(i).Interface()
376 final = append(final, element)
377 }
378 return slice(final, sliceParams)
379}
380
381func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
382 compareNode := node.children[2]
383 collected := []interface{}{}
384 v := reflect.ValueOf(value)
385 for i := 0; i < v.Len(); i++ {
386 element := v.Index(i).Interface()
387 result, err := intr.Execute(compareNode, element)
388 if err != nil {
389 return nil, err
390 }
391 if !isFalse(result) {
392 current, err := intr.Execute(node.children[1], element)
393 if err != nil {
394 return nil, err
395 }
396 if current != nil {
397 collected = append(collected, current)
398 }
399 }
400 }
401 return collected, nil
402}
403
404func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
405 collected := []interface{}{}
406 v := reflect.ValueOf(value)
407 for i := 0; i < v.Len(); i++ {
408 element := v.Index(i).Interface()
409 result, err := intr.Execute(node.children[1], element)
410 if err != nil {
411 return nil, err
412 }
413 if result != nil {
414 collected = append(collected, result)
415 }
416 }
417 return collected, nil
418}
diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go
new file mode 100644
index 0000000..817900c
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/lexer.go
@@ -0,0 +1,420 @@
1package jmespath
2
3import (
4 "bytes"
5 "encoding/json"
6 "fmt"
7 "strconv"
8 "strings"
9 "unicode/utf8"
10)
11
12type token struct {
13 tokenType tokType
14 value string
15 position int
16 length int
17}
18
19type tokType int
20
21const eof = -1
22
23// Lexer contains information about the expression being tokenized.
24type Lexer struct {
25 expression string // The expression provided by the user.
26 currentPos int // The current position in the string.
27 lastWidth int // The width of the current rune. This
28 buf bytes.Buffer // Internal buffer used for building up values.
29}
30
31// SyntaxError is the main error used whenever a lexing or parsing error occurs.
32type SyntaxError struct {
33 msg string // Error message displayed to user
34 Expression string // Expression that generated a SyntaxError
35 Offset int // The location in the string where the error occurred
36}
37
38func (e SyntaxError) Error() string {
39 // In the future, it would be good to underline the specific
40 // location where the error occurred.
41 return "SyntaxError: " + e.msg
42}
43
44// HighlightLocation will show where the syntax error occurred.
45// It will place a "^" character on a line below the expression
46// at the point where the syntax error occurred.
47func (e SyntaxError) HighlightLocation() string {
48 return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
49}
50
51//go:generate stringer -type=tokType
52const (
53 tUnknown tokType = iota
54 tStar
55 tDot
56 tFilter
57 tFlatten
58 tLparen
59 tRparen
60 tLbracket
61 tRbracket
62 tLbrace
63 tRbrace
64 tOr
65 tPipe
66 tNumber
67 tUnquotedIdentifier
68 tQuotedIdentifier
69 tComma
70 tColon
71 tLT
72 tLTE
73 tGT
74 tGTE
75 tEQ
76 tNE
77 tJSONLiteral
78 tStringLiteral
79 tCurrent
80 tExpref
81 tAnd
82 tNot
83 tEOF
84)
85
86var basicTokens = map[rune]tokType{
87 '.': tDot,
88 '*': tStar,
89 ',': tComma,
90 ':': tColon,
91 '{': tLbrace,
92 '}': tRbrace,
93 ']': tRbracket, // tLbracket not included because it could be "[]"
94 '(': tLparen,
95 ')': tRparen,
96 '@': tCurrent,
97}
98
99// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
100// When using this bitmask just be sure to shift the rune down 64 bits
101// before checking against identifierStartBits.
102const identifierStartBits uint64 = 576460745995190270
103
104// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
105var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
106
107var whiteSpace = map[rune]bool{
108 ' ': true, '\t': true, '\n': true, '\r': true,
109}
110
111func (t token) String() string {
112 return fmt.Sprintf("Token{%+v, %s, %d, %d}",
113 t.tokenType, t.value, t.position, t.length)
114}
115
116// NewLexer creates a new JMESPath lexer.
117func NewLexer() *Lexer {
118 lexer := Lexer{}
119 return &lexer
120}
121
122func (lexer *Lexer) next() rune {
123 if lexer.currentPos >= len(lexer.expression) {
124 lexer.lastWidth = 0
125 return eof
126 }
127 r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
128 lexer.lastWidth = w
129 lexer.currentPos += w
130 return r
131}
132
133func (lexer *Lexer) back() {
134 lexer.currentPos -= lexer.lastWidth
135}
136
137func (lexer *Lexer) peek() rune {
138 t := lexer.next()
139 lexer.back()
140 return t
141}
142
143// tokenize takes an expression and returns corresponding tokens.
144func (lexer *Lexer) tokenize(expression string) ([]token, error) {
145 var tokens []token
146 lexer.expression = expression
147 lexer.currentPos = 0
148 lexer.lastWidth = 0
149loop:
150 for {
151 r := lexer.next()
152 if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
153 t := lexer.consumeUnquotedIdentifier()
154 tokens = append(tokens, t)
155 } else if val, ok := basicTokens[r]; ok {
156 // Basic single char token.
157 t := token{
158 tokenType: val,
159 value: string(r),
160 position: lexer.currentPos - lexer.lastWidth,
161 length: 1,
162 }
163 tokens = append(tokens, t)
164 } else if r == '-' || (r >= '0' && r <= '9') {
165 t := lexer.consumeNumber()
166 tokens = append(tokens, t)
167 } else if r == '[' {
168 t := lexer.consumeLBracket()
169 tokens = append(tokens, t)
170 } else if r == '"' {
171 t, err := lexer.consumeQuotedIdentifier()
172 if err != nil {
173 return tokens, err
174 }
175 tokens = append(tokens, t)
176 } else if r == '\'' {
177 t, err := lexer.consumeRawStringLiteral()
178 if err != nil {
179 return tokens, err
180 }
181 tokens = append(tokens, t)
182 } else if r == '`' {
183 t, err := lexer.consumeLiteral()
184 if err != nil {
185 return tokens, err
186 }
187 tokens = append(tokens, t)
188 } else if r == '|' {
189 t := lexer.matchOrElse(r, '|', tOr, tPipe)
190 tokens = append(tokens, t)
191 } else if r == '<' {
192 t := lexer.matchOrElse(r, '=', tLTE, tLT)
193 tokens = append(tokens, t)
194 } else if r == '>' {
195 t := lexer.matchOrElse(r, '=', tGTE, tGT)
196 tokens = append(tokens, t)
197 } else if r == '!' {
198 t := lexer.matchOrElse(r, '=', tNE, tNot)
199 tokens = append(tokens, t)
200 } else if r == '=' {
201 t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
202 tokens = append(tokens, t)
203 } else if r == '&' {
204 t := lexer.matchOrElse(r, '&', tAnd, tExpref)
205 tokens = append(tokens, t)
206 } else if r == eof {
207 break loop
208 } else if _, ok := whiteSpace[r]; ok {
209 // Ignore whitespace
210 } else {
211 return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
212 }
213 }
214 tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
215 return tokens, nil
216}
217
218// Consume characters until the ending rune "r" is reached.
219// If the end of the expression is reached before seeing the
220// terminating rune "r", then an error is returned.
221// If no error occurs then the matching substring is returned.
222// The returned string will not include the ending rune.
223func (lexer *Lexer) consumeUntil(end rune) (string, error) {
224 start := lexer.currentPos
225 current := lexer.next()
226 for current != end && current != eof {
227 if current == '\\' && lexer.peek() != eof {
228 lexer.next()
229 }
230 current = lexer.next()
231 }
232 if lexer.lastWidth == 0 {
233 // Then we hit an EOF so we never reached the closing
234 // delimiter.
235 return "", SyntaxError{
236 msg: "Unclosed delimiter: " + string(end),
237 Expression: lexer.expression,
238 Offset: len(lexer.expression),
239 }
240 }
241 return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
242}
243
244func (lexer *Lexer) consumeLiteral() (token, error) {
245 start := lexer.currentPos
246 value, err := lexer.consumeUntil('`')
247 if err != nil {
248 return token{}, err
249 }
250 value = strings.Replace(value, "\\`", "`", -1)
251 return token{
252 tokenType: tJSONLiteral,
253 value: value,
254 position: start,
255 length: len(value),
256 }, nil
257}
258
259func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
260 start := lexer.currentPos
261 currentIndex := start
262 current := lexer.next()
263 for current != '\'' && lexer.peek() != eof {
264 if current == '\\' && lexer.peek() == '\'' {
265 chunk := lexer.expression[currentIndex : lexer.currentPos-1]
266 lexer.buf.WriteString(chunk)
267 lexer.buf.WriteString("'")
268 lexer.next()
269 currentIndex = lexer.currentPos
270 }
271 current = lexer.next()
272 }
273 if lexer.lastWidth == 0 {
274 // Then we hit an EOF so we never reached the closing
275 // delimiter.
276 return token{}, SyntaxError{
277 msg: "Unclosed delimiter: '",
278 Expression: lexer.expression,
279 Offset: len(lexer.expression),
280 }
281 }
282 if currentIndex < lexer.currentPos {
283 lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
284 }
285 value := lexer.buf.String()
286 // Reset the buffer so it can reused again.
287 lexer.buf.Reset()
288 return token{
289 tokenType: tStringLiteral,
290 value: value,
291 position: start,
292 length: len(value),
293 }, nil
294}
295
296func (lexer *Lexer) syntaxError(msg string) SyntaxError {
297 return SyntaxError{
298 msg: msg,
299 Expression: lexer.expression,
300 Offset: lexer.currentPos - 1,
301 }
302}
303
304// Checks for a two char token, otherwise matches a single character
305// token. This is used whenever a two char token overlaps a single
306// char token, e.g. "||" -> tPipe, "|" -> tOr.
307func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
308 start := lexer.currentPos - lexer.lastWidth
309 nextRune := lexer.next()
310 var t token
311 if nextRune == second {
312 t = token{
313 tokenType: matchedType,
314 value: string(first) + string(second),
315 position: start,
316 length: 2,
317 }
318 } else {
319 lexer.back()
320 t = token{
321 tokenType: singleCharType,
322 value: string(first),
323 position: start,
324 length: 1,
325 }
326 }
327 return t
328}
329
330func (lexer *Lexer) consumeLBracket() token {
331 // There's three options here:
332 // 1. A filter expression "[?"
333 // 2. A flatten operator "[]"
334 // 3. A bare rbracket "["
335 start := lexer.currentPos - lexer.lastWidth
336 nextRune := lexer.next()
337 var t token
338 if nextRune == '?' {
339 t = token{
340 tokenType: tFilter,
341 value: "[?",
342 position: start,
343 length: 2,
344 }
345 } else if nextRune == ']' {
346 t = token{
347 tokenType: tFlatten,
348 value: "[]",
349 position: start,
350 length: 2,
351 }
352 } else {
353 t = token{
354 tokenType: tLbracket,
355 value: "[",
356 position: start,
357 length: 1,
358 }
359 lexer.back()
360 }
361 return t
362}
363
364func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
365 start := lexer.currentPos
366 value, err := lexer.consumeUntil('"')
367 if err != nil {
368 return token{}, err
369 }
370 var decoded string
371 asJSON := []byte("\"" + value + "\"")
372 if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
373 return token{}, err
374 }
375 return token{
376 tokenType: tQuotedIdentifier,
377 value: decoded,
378 position: start - 1,
379 length: len(decoded),
380 }, nil
381}
382
383func (lexer *Lexer) consumeUnquotedIdentifier() token {
384 // Consume runes until we reach the end of an unquoted
385 // identifier.
386 start := lexer.currentPos - lexer.lastWidth
387 for {
388 r := lexer.next()
389 if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
390 lexer.back()
391 break
392 }
393 }
394 value := lexer.expression[start:lexer.currentPos]
395 return token{
396 tokenType: tUnquotedIdentifier,
397 value: value,
398 position: start,
399 length: lexer.currentPos - start,
400 }
401}
402
403func (lexer *Lexer) consumeNumber() token {
404 // Consume runes until we reach something that's not a number.
405 start := lexer.currentPos - lexer.lastWidth
406 for {
407 r := lexer.next()
408 if r < '0' || r > '9' {
409 lexer.back()
410 break
411 }
412 }
413 value := lexer.expression[start:lexer.currentPos]
414 return token{
415 tokenType: tNumber,
416 value: value,
417 position: start,
418 length: lexer.currentPos - start,
419 }
420}
diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go
new file mode 100644
index 0000000..1240a17
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/parser.go
@@ -0,0 +1,603 @@
1package jmespath
2
3import (
4 "encoding/json"
5 "fmt"
6 "strconv"
7 "strings"
8)
9
10type astNodeType int
11
12//go:generate stringer -type astNodeType
13const (
14 ASTEmpty astNodeType = iota
15 ASTComparator
16 ASTCurrentNode
17 ASTExpRef
18 ASTFunctionExpression
19 ASTField
20 ASTFilterProjection
21 ASTFlatten
22 ASTIdentity
23 ASTIndex
24 ASTIndexExpression
25 ASTKeyValPair
26 ASTLiteral
27 ASTMultiSelectHash
28 ASTMultiSelectList
29 ASTOrExpression
30 ASTAndExpression
31 ASTNotExpression
32 ASTPipe
33 ASTProjection
34 ASTSubexpression
35 ASTSlice
36 ASTValueProjection
37)
38
39// ASTNode represents the abstract syntax tree of a JMESPath expression.
40type ASTNode struct {
41 nodeType astNodeType
42 value interface{}
43 children []ASTNode
44}
45
46func (node ASTNode) String() string {
47 return node.PrettyPrint(0)
48}
49
50// PrettyPrint will pretty print the parsed AST.
51// The AST is an implementation detail and this pretty print
52// function is provided as a convenience method to help with
53// debugging. You should not rely on its output as the internal
54// structure of the AST may change at any time.
55func (node ASTNode) PrettyPrint(indent int) string {
56 spaces := strings.Repeat(" ", indent)
57 output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
58 nextIndent := indent + 2
59 if node.value != nil {
60 if converted, ok := node.value.(fmt.Stringer); ok {
61 // Account for things like comparator nodes
62 // that are enums with a String() method.
63 output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
64 } else {
65 output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
66 }
67 }
68 lastIndex := len(node.children)
69 if lastIndex > 0 {
70 output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
71 childIndent := nextIndent + 2
72 for _, elem := range node.children {
73 output += elem.PrettyPrint(childIndent)
74 }
75 }
76 output += fmt.Sprintf("%s}\n", spaces)
77 return output
78}
79
80var bindingPowers = map[tokType]int{
81 tEOF: 0,
82 tUnquotedIdentifier: 0,
83 tQuotedIdentifier: 0,
84 tRbracket: 0,
85 tRparen: 0,
86 tComma: 0,
87 tRbrace: 0,
88 tNumber: 0,
89 tCurrent: 0,
90 tExpref: 0,
91 tColon: 0,
92 tPipe: 1,
93 tOr: 2,
94 tAnd: 3,
95 tEQ: 5,
96 tLT: 5,
97 tLTE: 5,
98 tGT: 5,
99 tGTE: 5,
100 tNE: 5,
101 tFlatten: 9,
102 tStar: 20,
103 tFilter: 21,
104 tDot: 40,
105 tNot: 45,
106 tLbrace: 50,
107 tLbracket: 55,
108 tLparen: 60,
109}
110
111// Parser holds state about the current expression being parsed.
112type Parser struct {
113 expression string
114 tokens []token
115 index int
116}
117
118// NewParser creates a new JMESPath parser.
119func NewParser() *Parser {
120 p := Parser{}
121 return &p
122}
123
124// Parse will compile a JMESPath expression.
125func (p *Parser) Parse(expression string) (ASTNode, error) {
126 lexer := NewLexer()
127 p.expression = expression
128 p.index = 0
129 tokens, err := lexer.tokenize(expression)
130 if err != nil {
131 return ASTNode{}, err
132 }
133 p.tokens = tokens
134 parsed, err := p.parseExpression(0)
135 if err != nil {
136 return ASTNode{}, err
137 }
138 if p.current() != tEOF {
139 return ASTNode{}, p.syntaxError(fmt.Sprintf(
140 "Unexpected token at the end of the expresssion: %s", p.current()))
141 }
142 return parsed, nil
143}
144
145func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
146 var err error
147 leftToken := p.lookaheadToken(0)
148 p.advance()
149 leftNode, err := p.nud(leftToken)
150 if err != nil {
151 return ASTNode{}, err
152 }
153 currentToken := p.current()
154 for bindingPower < bindingPowers[currentToken] {
155 p.advance()
156 leftNode, err = p.led(currentToken, leftNode)
157 if err != nil {
158 return ASTNode{}, err
159 }
160 currentToken = p.current()
161 }
162 return leftNode, nil
163}
164
165func (p *Parser) parseIndexExpression() (ASTNode, error) {
166 if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
167 return p.parseSliceExpression()
168 }
169 indexStr := p.lookaheadToken(0).value
170 parsedInt, err := strconv.Atoi(indexStr)
171 if err != nil {
172 return ASTNode{}, err
173 }
174 indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
175 p.advance()
176 if err := p.match(tRbracket); err != nil {
177 return ASTNode{}, err
178 }
179 return indexNode, nil
180}
181
182func (p *Parser) parseSliceExpression() (ASTNode, error) {
183 parts := []*int{nil, nil, nil}
184 index := 0
185 current := p.current()
186 for current != tRbracket && index < 3 {
187 if current == tColon {
188 index++
189 p.advance()
190 } else if current == tNumber {
191 parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
192 if err != nil {
193 return ASTNode{}, err
194 }
195 parts[index] = &parsedInt
196 p.advance()
197 } else {
198 return ASTNode{}, p.syntaxError(
199 "Expected tColon or tNumber" + ", received: " + p.current().String())
200 }
201 current = p.current()
202 }
203 if err := p.match(tRbracket); err != nil {
204 return ASTNode{}, err
205 }
206 return ASTNode{
207 nodeType: ASTSlice,
208 value: parts,
209 }, nil
210}
211
212func (p *Parser) match(tokenType tokType) error {
213 if p.current() == tokenType {
214 p.advance()
215 return nil
216 }
217 return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
218}
219
220func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
221 switch tokenType {
222 case tDot:
223 if p.current() != tStar {
224 right, err := p.parseDotRHS(bindingPowers[tDot])
225 return ASTNode{
226 nodeType: ASTSubexpression,
227 children: []ASTNode{node, right},
228 }, err
229 }
230 p.advance()
231 right, err := p.parseProjectionRHS(bindingPowers[tDot])
232 return ASTNode{
233 nodeType: ASTValueProjection,
234 children: []ASTNode{node, right},
235 }, err
236 case tPipe:
237 right, err := p.parseExpression(bindingPowers[tPipe])
238 return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
239 case tOr:
240 right, err := p.parseExpression(bindingPowers[tOr])
241 return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
242 case tAnd:
243 right, err := p.parseExpression(bindingPowers[tAnd])
244 return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
245 case tLparen:
246 name := node.value
247 var args []ASTNode
248 for p.current() != tRparen {
249 expression, err := p.parseExpression(0)
250 if err != nil {
251 return ASTNode{}, err
252 }
253 if p.current() == tComma {
254 if err := p.match(tComma); err != nil {
255 return ASTNode{}, err
256 }
257 }
258 args = append(args, expression)
259 }
260 if err := p.match(tRparen); err != nil {
261 return ASTNode{}, err
262 }
263 return ASTNode{
264 nodeType: ASTFunctionExpression,
265 value: name,
266 children: args,
267 }, nil
268 case tFilter:
269 return p.parseFilter(node)
270 case tFlatten:
271 left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
272 right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
273 return ASTNode{
274 nodeType: ASTProjection,
275 children: []ASTNode{left, right},
276 }, err
277 case tEQ, tNE, tGT, tGTE, tLT, tLTE:
278 right, err := p.parseExpression(bindingPowers[tokenType])
279 if err != nil {
280 return ASTNode{}, err
281 }
282 return ASTNode{
283 nodeType: ASTComparator,
284 value: tokenType,
285 children: []ASTNode{node, right},
286 }, nil
287 case tLbracket:
288 tokenType := p.current()
289 var right ASTNode
290 var err error
291 if tokenType == tNumber || tokenType == tColon {
292 right, err = p.parseIndexExpression()
293 if err != nil {
294 return ASTNode{}, err
295 }
296 return p.projectIfSlice(node, right)
297 }
298 // Otherwise this is a projection.
299 if err := p.match(tStar); err != nil {
300 return ASTNode{}, err
301 }
302 if err := p.match(tRbracket); err != nil {
303 return ASTNode{}, err
304 }
305 right, err = p.parseProjectionRHS(bindingPowers[tStar])
306 if err != nil {
307 return ASTNode{}, err
308 }
309 return ASTNode{
310 nodeType: ASTProjection,
311 children: []ASTNode{node, right},
312 }, nil
313 }
314 return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
315}
316
317func (p *Parser) nud(token token) (ASTNode, error) {
318 switch token.tokenType {
319 case tJSONLiteral:
320 var parsed interface{}
321 err := json.Unmarshal([]byte(token.value), &parsed)
322 if err != nil {
323 return ASTNode{}, err
324 }
325 return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
326 case tStringLiteral:
327 return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
328 case tUnquotedIdentifier:
329 return ASTNode{
330 nodeType: ASTField,
331 value: token.value,
332 }, nil
333 case tQuotedIdentifier:
334 node := ASTNode{nodeType: ASTField, value: token.value}
335 if p.current() == tLparen {
336 return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
337 }
338 return node, nil
339 case tStar:
340 left := ASTNode{nodeType: ASTIdentity}
341 var right ASTNode
342 var err error
343 if p.current() == tRbracket {
344 right = ASTNode{nodeType: ASTIdentity}
345 } else {
346 right, err = p.parseProjectionRHS(bindingPowers[tStar])
347 }
348 return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
349 case tFilter:
350 return p.parseFilter(ASTNode{nodeType: ASTIdentity})
351 case tLbrace:
352 return p.parseMultiSelectHash()
353 case tFlatten:
354 left := ASTNode{
355 nodeType: ASTFlatten,
356 children: []ASTNode{{nodeType: ASTIdentity}},
357 }
358 right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
359 if err != nil {
360 return ASTNode{}, err
361 }
362 return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
363 case tLbracket:
364 tokenType := p.current()
365 //var right ASTNode
366 if tokenType == tNumber || tokenType == tColon {
367 right, err := p.parseIndexExpression()
368 if err != nil {
369 return ASTNode{}, nil
370 }
371 return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
372 } else if tokenType == tStar && p.lookahead(1) == tRbracket {
373 p.advance()
374 p.advance()
375 right, err := p.parseProjectionRHS(bindingPowers[tStar])
376 if err != nil {
377 return ASTNode{}, err
378 }
379 return ASTNode{
380 nodeType: ASTProjection,
381 children: []ASTNode{{nodeType: ASTIdentity}, right},
382 }, nil
383 } else {
384 return p.parseMultiSelectList()
385 }
386 case tCurrent:
387 return ASTNode{nodeType: ASTCurrentNode}, nil
388 case tExpref:
389 expression, err := p.parseExpression(bindingPowers[tExpref])
390 if err != nil {
391 return ASTNode{}, err
392 }
393 return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
394 case tNot:
395 expression, err := p.parseExpression(bindingPowers[tNot])
396 if err != nil {
397 return ASTNode{}, err
398 }
399 return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
400 case tLparen:
401 expression, err := p.parseExpression(0)
402 if err != nil {
403 return ASTNode{}, err
404 }
405 if err := p.match(tRparen); err != nil {
406 return ASTNode{}, err
407 }
408 return expression, nil
409 case tEOF:
410 return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
411 }
412
413 return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
414}
415
416func (p *Parser) parseMultiSelectList() (ASTNode, error) {
417 var expressions []ASTNode
418 for {
419 expression, err := p.parseExpression(0)
420 if err != nil {
421 return ASTNode{}, err
422 }
423 expressions = append(expressions, expression)
424 if p.current() == tRbracket {
425 break
426 }
427 err = p.match(tComma)
428 if err != nil {
429 return ASTNode{}, err
430 }
431 }
432 err := p.match(tRbracket)
433 if err != nil {
434 return ASTNode{}, err
435 }
436 return ASTNode{
437 nodeType: ASTMultiSelectList,
438 children: expressions,
439 }, nil
440}
441
442func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
443 var children []ASTNode
444 for {
445 keyToken := p.lookaheadToken(0)
446 if err := p.match(tUnquotedIdentifier); err != nil {
447 if err := p.match(tQuotedIdentifier); err != nil {
448 return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
449 }
450 }
451 keyName := keyToken.value
452 err := p.match(tColon)
453 if err != nil {
454 return ASTNode{}, err
455 }
456 value, err := p.parseExpression(0)
457 if err != nil {
458 return ASTNode{}, err
459 }
460 node := ASTNode{
461 nodeType: ASTKeyValPair,
462 value: keyName,
463 children: []ASTNode{value},
464 }
465 children = append(children, node)
466 if p.current() == tComma {
467 err := p.match(tComma)
468 if err != nil {
469 return ASTNode{}, nil
470 }
471 } else if p.current() == tRbrace {
472 err := p.match(tRbrace)
473 if err != nil {
474 return ASTNode{}, nil
475 }
476 break
477 }
478 }
479 return ASTNode{
480 nodeType: ASTMultiSelectHash,
481 children: children,
482 }, nil
483}
484
485func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
486 indexExpr := ASTNode{
487 nodeType: ASTIndexExpression,
488 children: []ASTNode{left, right},
489 }
490 if right.nodeType == ASTSlice {
491 right, err := p.parseProjectionRHS(bindingPowers[tStar])
492 return ASTNode{
493 nodeType: ASTProjection,
494 children: []ASTNode{indexExpr, right},
495 }, err
496 }
497 return indexExpr, nil
498}
499func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
500 var right, condition ASTNode
501 var err error
502 condition, err = p.parseExpression(0)
503 if err != nil {
504 return ASTNode{}, err
505 }
506 if err := p.match(tRbracket); err != nil {
507 return ASTNode{}, err
508 }
509 if p.current() == tFlatten {
510 right = ASTNode{nodeType: ASTIdentity}
511 } else {
512 right, err = p.parseProjectionRHS(bindingPowers[tFilter])
513 if err != nil {
514 return ASTNode{}, err
515 }
516 }
517
518 return ASTNode{
519 nodeType: ASTFilterProjection,
520 children: []ASTNode{node, right, condition},
521 }, nil
522}
523
524func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
525 lookahead := p.current()
526 if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
527 return p.parseExpression(bindingPower)
528 } else if lookahead == tLbracket {
529 if err := p.match(tLbracket); err != nil {
530 return ASTNode{}, err
531 }
532 return p.parseMultiSelectList()
533 } else if lookahead == tLbrace {
534 if err := p.match(tLbrace); err != nil {
535 return ASTNode{}, err
536 }
537 return p.parseMultiSelectHash()
538 }
539 return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
540}
541
542func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
543 current := p.current()
544 if bindingPowers[current] < 10 {
545 return ASTNode{nodeType: ASTIdentity}, nil
546 } else if current == tLbracket {
547 return p.parseExpression(bindingPower)
548 } else if current == tFilter {
549 return p.parseExpression(bindingPower)
550 } else if current == tDot {
551 err := p.match(tDot)
552 if err != nil {
553 return ASTNode{}, err
554 }
555 return p.parseDotRHS(bindingPower)
556 } else {
557 return ASTNode{}, p.syntaxError("Error")
558 }
559}
560
561func (p *Parser) lookahead(number int) tokType {
562 return p.lookaheadToken(number).tokenType
563}
564
565func (p *Parser) current() tokType {
566 return p.lookahead(0)
567}
568
569func (p *Parser) lookaheadToken(number int) token {
570 return p.tokens[p.index+number]
571}
572
573func (p *Parser) advance() {
574 p.index++
575}
576
577func tokensOneOf(elements []tokType, token tokType) bool {
578 for _, elem := range elements {
579 if elem == token {
580 return true
581 }
582 }
583 return false
584}
585
586func (p *Parser) syntaxError(msg string) SyntaxError {
587 return SyntaxError{
588 msg: msg,
589 Expression: p.expression,
590 Offset: p.lookaheadToken(0).position,
591 }
592}
593
594// Create a SyntaxError based on the provided token.
595// This differs from syntaxError() which creates a SyntaxError
596// based on the current lookahead token.
597func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
598 return SyntaxError{
599 msg: msg,
600 Expression: p.expression,
601 Offset: t.position,
602 }
603}
diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
new file mode 100644
index 0000000..dae79cb
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
@@ -0,0 +1,16 @@
1// generated by stringer -type=tokType; DO NOT EDIT
2
3package jmespath
4
5import "fmt"
6
7const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
8
9var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
10
11func (i tokType) String() string {
12 if i < 0 || i >= tokType(len(_tokType_index)-1) {
13 return fmt.Sprintf("tokType(%d)", i)
14 }
15 return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
16}
diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go
new file mode 100644
index 0000000..ddc1b7d
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/util.go
@@ -0,0 +1,185 @@
1package jmespath
2
3import (
4 "errors"
5 "reflect"
6)
7
8// IsFalse determines if an object is false based on the JMESPath spec.
9// JMESPath defines false values to be any of:
10// - An empty string array, or hash.
11// - The boolean value false.
12// - nil
13func isFalse(value interface{}) bool {
14 switch v := value.(type) {
15 case bool:
16 return !v
17 case []interface{}:
18 return len(v) == 0
19 case map[string]interface{}:
20 return len(v) == 0
21 case string:
22 return len(v) == 0
23 case nil:
24 return true
25 }
26 // Try the reflection cases before returning false.
27 rv := reflect.ValueOf(value)
28 switch rv.Kind() {
29 case reflect.Struct:
30 // A struct type will never be false, even if
31 // all of its values are the zero type.
32 return false
33 case reflect.Slice, reflect.Map:
34 return rv.Len() == 0
35 case reflect.Ptr:
36 if rv.IsNil() {
37 return true
38 }
39 // If it's a pointer type, we'll try to deref the pointer
40 // and evaluate the pointer value for isFalse.
41 element := rv.Elem()
42 return isFalse(element.Interface())
43 }
44 return false
45}
46
47// ObjsEqual is a generic object equality check.
48// It will take two arbitrary objects and recursively determine
49// if they are equal.
50func objsEqual(left interface{}, right interface{}) bool {
51 return reflect.DeepEqual(left, right)
52}
53
54// SliceParam refers to a single part of a slice.
55// A slice consists of a start, a stop, and a step, similar to
56// python slices.
57type sliceParam struct {
58 N int
59 Specified bool
60}
61
62// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
63func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
64 computed, err := computeSliceParams(len(slice), parts)
65 if err != nil {
66 return nil, err
67 }
68 start, stop, step := computed[0], computed[1], computed[2]
69 result := []interface{}{}
70 if step > 0 {
71 for i := start; i < stop; i += step {
72 result = append(result, slice[i])
73 }
74 } else {
75 for i := start; i > stop; i += step {
76 result = append(result, slice[i])
77 }
78 }
79 return result, nil
80}
81
82func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
83 var start, stop, step int
84 if !parts[2].Specified {
85 step = 1
86 } else if parts[2].N == 0 {
87 return nil, errors.New("Invalid slice, step cannot be 0")
88 } else {
89 step = parts[2].N
90 }
91 var stepValueNegative bool
92 if step < 0 {
93 stepValueNegative = true
94 } else {
95 stepValueNegative = false
96 }
97
98 if !parts[0].Specified {
99 if stepValueNegative {
100 start = length - 1
101 } else {
102 start = 0
103 }
104 } else {
105 start = capSlice(length, parts[0].N, step)
106 }
107
108 if !parts[1].Specified {
109 if stepValueNegative {
110 stop = -1
111 } else {
112 stop = length
113 }
114 } else {
115 stop = capSlice(length, parts[1].N, step)
116 }
117 return []int{start, stop, step}, nil
118}
119
120func capSlice(length int, actual int, step int) int {
121 if actual < 0 {
122 actual += length
123 if actual < 0 {
124 if step < 0 {
125 actual = -1
126 } else {
127 actual = 0
128 }
129 }
130 } else if actual >= length {
131 if step < 0 {
132 actual = length - 1
133 } else {
134 actual = length
135 }
136 }
137 return actual
138}
139
140// ToArrayNum converts an empty interface type to a slice of float64.
141// If any element in the array cannot be converted, then nil is returned
142// along with a second value of false.
143func toArrayNum(data interface{}) ([]float64, bool) {
144 // Is there a better way to do this with reflect?
145 if d, ok := data.([]interface{}); ok {
146 result := make([]float64, len(d))
147 for i, el := range d {
148 item, ok := el.(float64)
149 if !ok {
150 return nil, false
151 }
152 result[i] = item
153 }
154 return result, true
155 }
156 return nil, false
157}
158
159// ToArrayStr converts an empty interface type to a slice of strings.
160// If any element in the array cannot be converted, then nil is returned
161// along with a second value of false. If the input data could be entirely
162// converted, then the converted data, along with a second value of true,
163// will be returned.
164func toArrayStr(data interface{}) ([]string, bool) {
165 // Is there a better way to do this with reflect?
166 if d, ok := data.([]interface{}); ok {
167 result := make([]string, len(d))
168 for i, el := range d {
169 item, ok := el.(string)
170 if !ok {
171 return nil, false
172 }
173 result[i] = item
174 }
175 return result, true
176 }
177 return nil, false
178}
179
180func isSliceType(v interface{}) bool {
181 if v == nil {
182 return false
183 }
184 return reflect.TypeOf(v).Kind() == reflect.Slice
185}
diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE
new file mode 100644
index 0000000..2298515
--- /dev/null
+++ b/vendor/github.com/mitchellh/copystructure/LICENSE
@@ -0,0 +1,21 @@
1The MIT License (MIT)
2
3Copyright (c) 2014 Mitchell Hashimoto
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in
13all copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md
new file mode 100644
index 0000000..bcb8c8d
--- /dev/null
+++ b/vendor/github.com/mitchellh/copystructure/README.md
@@ -0,0 +1,21 @@
1# copystructure
2
3copystructure is a Go library for deep copying values in Go.
4
5This allows you to copy Go values that may contain reference values
6such as maps, slices, or pointers, and copy their data as well instead
7of just their references.
8
9## Installation
10
11Standard `go get`:
12
13```
14$ go get github.com/mitchellh/copystructure
15```
16
17## Usage & Example
18
19For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure).
20
21The `Copy` function has examples associated with it there.
diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go
new file mode 100644
index 0000000..db6a6aa
--- /dev/null
+++ b/vendor/github.com/mitchellh/copystructure/copier_time.go
@@ -0,0 +1,15 @@
1package copystructure
2
3import (
4 "reflect"
5 "time"
6)
7
8func init() {
9 Copiers[reflect.TypeOf(time.Time{})] = timeCopier
10}
11
12func timeCopier(v interface{}) (interface{}, error) {
13 // Just... copy it.
14 return v.(time.Time), nil
15}
diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go
new file mode 100644
index 0000000..0e725ea
--- /dev/null
+++ b/vendor/github.com/mitchellh/copystructure/copystructure.go
@@ -0,0 +1,477 @@
1package copystructure
2
3import (
4 "errors"
5 "reflect"
6 "sync"
7
8 "github.com/mitchellh/reflectwalk"
9)
10
11// Copy returns a deep copy of v.
12func Copy(v interface{}) (interface{}, error) {
13 return Config{}.Copy(v)
14}
15
16// CopierFunc is a function that knows how to deep copy a specific type.
17// Register these globally with the Copiers variable.
18type CopierFunc func(interface{}) (interface{}, error)
19
20// Copiers is a map of types that behave specially when they are copied.
21// If a type is found in this map while deep copying, this function
22// will be called to copy it instead of attempting to copy all fields.
23//
24// The key should be the type, obtained using: reflect.TypeOf(value with type).
25//
26// It is unsafe to write to this map after Copies have started. If you
27// are writing to this map while also copying, wrap all modifications to
28// this map as well as to Copy in a mutex.
29var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc)
30
31// Must is a helper that wraps a call to a function returning
32// (interface{}, error) and panics if the error is non-nil. It is intended
33// for use in variable initializations and should only be used when a copy
34// error should be a crashing case.
35func Must(v interface{}, err error) interface{} {
36 if err != nil {
37 panic("copy error: " + err.Error())
38 }
39
40 return v
41}
42
43var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true")
44
45type Config struct {
46 // Lock any types that are a sync.Locker and are not a mutex while copying.
47 // If there is an RLocker method, use that to get the sync.Locker.
48 Lock bool
49
50 // Copiers is a map of types associated with a CopierFunc. Use the global
51 // Copiers map if this is nil.
52 Copiers map[reflect.Type]CopierFunc
53}
54
55func (c Config) Copy(v interface{}) (interface{}, error) {
56 if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr {
57 return nil, errPointerRequired
58 }
59
60 w := new(walker)
61 if c.Lock {
62 w.useLocks = true
63 }
64
65 if c.Copiers == nil {
66 c.Copiers = Copiers
67 }
68
69 err := reflectwalk.Walk(v, w)
70 if err != nil {
71 return nil, err
72 }
73
74 // Get the result. If the result is nil, then we want to turn it
75 // into a typed nil if we can.
76 result := w.Result
77 if result == nil {
78 val := reflect.ValueOf(v)
79 result = reflect.Indirect(reflect.New(val.Type())).Interface()
80 }
81
82 return result, nil
83}
84
85// Return the key used to index interfaces types we've seen. Store the number
86// of pointers in the upper 32bits, and the depth in the lower 32bits. This is
87// easy to calculate, easy to match a key with our current depth, and we don't
88// need to deal with initializing and cleaning up nested maps or slices.
89func ifaceKey(pointers, depth int) uint64 {
90 return uint64(pointers)<<32 | uint64(depth)
91}
92
93type walker struct {
94 Result interface{}
95
96 depth int
97 ignoreDepth int
98 vals []reflect.Value
99 cs []reflect.Value
100
101 // This stores the number of pointers we've walked over, indexed by depth.
102 ps []int
103
104 // If an interface is indirected by a pointer, we need to know the type of
105 // interface to create when creating the new value. Store the interface
106 // types here, indexed by both the walk depth and the number of pointers
107 // already seen at that depth. Use ifaceKey to calculate the proper uint64
108 // value.
109 ifaceTypes map[uint64]reflect.Type
110
111 // any locks we've taken, indexed by depth
112 locks []sync.Locker
113 // take locks while walking the structure
114 useLocks bool
115}
116
117func (w *walker) Enter(l reflectwalk.Location) error {
118 w.depth++
119
120 // ensure we have enough elements to index via w.depth
121 for w.depth >= len(w.locks) {
122 w.locks = append(w.locks, nil)
123 }
124
125 for len(w.ps) < w.depth+1 {
126 w.ps = append(w.ps, 0)
127 }
128
129 return nil
130}
131
132func (w *walker) Exit(l reflectwalk.Location) error {
133 locker := w.locks[w.depth]
134 w.locks[w.depth] = nil
135 if locker != nil {
136 defer locker.Unlock()
137 }
138
139 // clear out pointers and interfaces as we exit the stack
140 w.ps[w.depth] = 0
141
142 for k := range w.ifaceTypes {
143 mask := uint64(^uint32(0))
144 if k&mask == uint64(w.depth) {
145 delete(w.ifaceTypes, k)
146 }
147 }
148
149 w.depth--
150 if w.ignoreDepth > w.depth {
151 w.ignoreDepth = 0
152 }
153
154 if w.ignoring() {
155 return nil
156 }
157
158 switch l {
159 case reflectwalk.Map:
160 fallthrough
161 case reflectwalk.Slice:
162 // Pop map off our container
163 w.cs = w.cs[:len(w.cs)-1]
164 case reflectwalk.MapValue:
165 // Pop off the key and value
166 mv := w.valPop()
167 mk := w.valPop()
168 m := w.cs[len(w.cs)-1]
169
170 // If mv is the zero value, SetMapIndex deletes the key form the map,
171 // or in this case never adds it. We need to create a properly typed
172 // zero value so that this key can be set.
173 if !mv.IsValid() {
174 mv = reflect.Zero(m.Type().Elem())
175 }
176 m.SetMapIndex(mk, mv)
177 case reflectwalk.SliceElem:
178 // Pop off the value and the index and set it on the slice
179 v := w.valPop()
180 i := w.valPop().Interface().(int)
181 if v.IsValid() {
182 s := w.cs[len(w.cs)-1]
183 se := s.Index(i)
184 if se.CanSet() {
185 se.Set(v)
186 }
187 }
188 case reflectwalk.Struct:
189 w.replacePointerMaybe()
190
191 // Remove the struct from the container stack
192 w.cs = w.cs[:len(w.cs)-1]
193 case reflectwalk.StructField:
194 // Pop off the value and the field
195 v := w.valPop()
196 f := w.valPop().Interface().(reflect.StructField)
197 if v.IsValid() {
198 s := w.cs[len(w.cs)-1]
199 sf := reflect.Indirect(s).FieldByName(f.Name)
200
201 if sf.CanSet() {
202 sf.Set(v)
203 }
204 }
205 case reflectwalk.WalkLoc:
206 // Clear out the slices for GC
207 w.cs = nil
208 w.vals = nil
209 }
210
211 return nil
212}
213
214func (w *walker) Map(m reflect.Value) error {
215 if w.ignoring() {
216 return nil
217 }
218 w.lock(m)
219
220 // Create the map. If the map itself is nil, then just make a nil map
221 var newMap reflect.Value
222 if m.IsNil() {
223 newMap = reflect.Indirect(reflect.New(m.Type()))
224 } else {
225 newMap = reflect.MakeMap(m.Type())
226 }
227
228 w.cs = append(w.cs, newMap)
229 w.valPush(newMap)
230 return nil
231}
232
233func (w *walker) MapElem(m, k, v reflect.Value) error {
234 return nil
235}
236
237func (w *walker) PointerEnter(v bool) error {
238 if v {
239 w.ps[w.depth]++
240 }
241 return nil
242}
243
244func (w *walker) PointerExit(v bool) error {
245 if v {
246 w.ps[w.depth]--
247 }
248 return nil
249}
250
251func (w *walker) Interface(v reflect.Value) error {
252 if !v.IsValid() {
253 return nil
254 }
255 if w.ifaceTypes == nil {
256 w.ifaceTypes = make(map[uint64]reflect.Type)
257 }
258
259 w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type()
260 return nil
261}
262
263func (w *walker) Primitive(v reflect.Value) error {
264 if w.ignoring() {
265 return nil
266 }
267 w.lock(v)
268
269 // IsValid verifies the v is non-zero and CanInterface verifies
270 // that we're allowed to read this value (unexported fields).
271 var newV reflect.Value
272 if v.IsValid() && v.CanInterface() {
273 newV = reflect.New(v.Type())
274 newV.Elem().Set(v)
275 }
276
277 w.valPush(newV)
278 w.replacePointerMaybe()
279 return nil
280}
281
282func (w *walker) Slice(s reflect.Value) error {
283 if w.ignoring() {
284 return nil
285 }
286 w.lock(s)
287
288 var newS reflect.Value
289 if s.IsNil() {
290 newS = reflect.Indirect(reflect.New(s.Type()))
291 } else {
292 newS = reflect.MakeSlice(s.Type(), s.Len(), s.Cap())
293 }
294
295 w.cs = append(w.cs, newS)
296 w.valPush(newS)
297 return nil
298}
299
300func (w *walker) SliceElem(i int, elem reflect.Value) error {
301 if w.ignoring() {
302 return nil
303 }
304
305 // We don't write the slice here because elem might still be
306 // arbitrarily complex. Just record the index and continue on.
307 w.valPush(reflect.ValueOf(i))
308
309 return nil
310}
311
312func (w *walker) Struct(s reflect.Value) error {
313 if w.ignoring() {
314 return nil
315 }
316 w.lock(s)
317
318 var v reflect.Value
319 if c, ok := Copiers[s.Type()]; ok {
320 // We have a Copier for this struct, so we use that copier to
321 // get the copy, and we ignore anything deeper than this.
322 w.ignoreDepth = w.depth
323
324 dup, err := c(s.Interface())
325 if err != nil {
326 return err
327 }
328
329 v = reflect.ValueOf(dup)
330 } else {
331 // No copier, we copy ourselves and allow reflectwalk to guide
332 // us deeper into the structure for copying.
333 v = reflect.New(s.Type())
334 }
335
336 // Push the value onto the value stack for setting the struct field,
337 // and add the struct itself to the containers stack in case we walk
338 // deeper so that its own fields can be modified.
339 w.valPush(v)
340 w.cs = append(w.cs, v)
341
342 return nil
343}
344
345func (w *walker) StructField(f reflect.StructField, v reflect.Value) error {
346 if w.ignoring() {
347 return nil
348 }
349
350 // If PkgPath is non-empty, this is a private (unexported) field.
351 // We do not set this unexported since the Go runtime doesn't allow us.
352 if f.PkgPath != "" {
353 return reflectwalk.SkipEntry
354 }
355
356 // Push the field onto the stack, we'll handle it when we exit
357 // the struct field in Exit...
358 w.valPush(reflect.ValueOf(f))
359 return nil
360}
361
362// ignore causes the walker to ignore any more values until we exit this on
363func (w *walker) ignore() {
364 w.ignoreDepth = w.depth
365}
366
367func (w *walker) ignoring() bool {
368 return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth
369}
370
371func (w *walker) pointerPeek() bool {
372 return w.ps[w.depth] > 0
373}
374
375func (w *walker) valPop() reflect.Value {
376 result := w.vals[len(w.vals)-1]
377 w.vals = w.vals[:len(w.vals)-1]
378
379 // If we're out of values, that means we popped everything off. In
380 // this case, we reset the result so the next pushed value becomes
381 // the result.
382 if len(w.vals) == 0 {
383 w.Result = nil
384 }
385
386 return result
387}
388
389func (w *walker) valPush(v reflect.Value) {
390 w.vals = append(w.vals, v)
391
392 // If we haven't set the result yet, then this is the result since
393 // it is the first (outermost) value we're seeing.
394 if w.Result == nil && v.IsValid() {
395 w.Result = v.Interface()
396 }
397}
398
399func (w *walker) replacePointerMaybe() {
400 // Determine the last pointer value. If it is NOT a pointer, then
401 // we need to push that onto the stack.
402 if !w.pointerPeek() {
403 w.valPush(reflect.Indirect(w.valPop()))
404 return
405 }
406
407 v := w.valPop()
408 for i := 1; i < w.ps[w.depth]; i++ {
409 if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
410 iface := reflect.New(iType).Elem()
411 iface.Set(v)
412 v = iface
413 }
414
415 p := reflect.New(v.Type())
416 p.Elem().Set(v)
417 v = p
418 }
419
420 w.valPush(v)
421}
422
423// if this value is a Locker, lock it and add it to the locks slice
424func (w *walker) lock(v reflect.Value) {
425 if !w.useLocks {
426 return
427 }
428
429 if !v.IsValid() || !v.CanInterface() {
430 return
431 }
432
433 type rlocker interface {
434 RLocker() sync.Locker
435 }
436
437 var locker sync.Locker
438
439 // We can't call Interface() on a value directly, since that requires
440 // a copy. This is OK, since the pointer to a value which is a sync.Locker
441 // is also a sync.Locker.
442 if v.Kind() == reflect.Ptr {
443 switch l := v.Interface().(type) {
444 case rlocker:
445 // don't lock a mutex directly
446 if _, ok := l.(*sync.RWMutex); !ok {
447 locker = l.RLocker()
448 }
449 case sync.Locker:
450 locker = l
451 }
452 } else if v.CanAddr() {
453 switch l := v.Addr().Interface().(type) {
454 case rlocker:
455 // don't lock a mutex directly
456 if _, ok := l.(*sync.RWMutex); !ok {
457 locker = l.RLocker()
458 }
459 case sync.Locker:
460 locker = l
461 }
462 }
463
464 // still no callable locker
465 if locker == nil {
466 return
467 }
468
469 // don't lock a mutex directly
470 switch locker.(type) {
471 case *sync.Mutex, *sync.RWMutex:
472 return
473 }
474
475 locker.Lock()
476 w.locks[w.depth] = locker
477}
diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE
new file mode 100644
index 0000000..f9c841a
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/LICENSE
@@ -0,0 +1,21 @@
1The MIT License (MIT)
2
3Copyright (c) 2013 Mitchell Hashimoto
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in
13all copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md
new file mode 100644
index 0000000..d70706d
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/README.md
@@ -0,0 +1,14 @@
1# go-homedir
2
3This is a Go library for detecting the user's home directory without
4the use of cgo, so the library can be used in cross-compilation environments.
5
6Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
7for a user, and `homedir.Expand()` to expand the `~` in a path to the home
8directory.
9
10**Why not just use `os/user`?** The built-in `os/user` package requires
11cgo on Darwin systems. This means that any Go code that uses that package
12cannot cross compile. But 99% of the time the use for `os/user` is just to
13retrieve the home directory, which we can do for the current user without
14cgo. This library does that, enabling cross-compilation.
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
new file mode 100644
index 0000000..47e1f9e
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/homedir.go
@@ -0,0 +1,137 @@
1package homedir
2
3import (
4 "bytes"
5 "errors"
6 "os"
7 "os/exec"
8 "path/filepath"
9 "runtime"
10 "strconv"
11 "strings"
12 "sync"
13)
14
15// DisableCache will disable caching of the home directory. Caching is enabled
16// by default.
17var DisableCache bool
18
19var homedirCache string
20var cacheLock sync.RWMutex
21
22// Dir returns the home directory for the executing user.
23//
24// This uses an OS-specific method for discovering the home directory.
25// An error is returned if a home directory cannot be detected.
26func Dir() (string, error) {
27 if !DisableCache {
28 cacheLock.RLock()
29 cached := homedirCache
30 cacheLock.RUnlock()
31 if cached != "" {
32 return cached, nil
33 }
34 }
35
36 cacheLock.Lock()
37 defer cacheLock.Unlock()
38
39 var result string
40 var err error
41 if runtime.GOOS == "windows" {
42 result, err = dirWindows()
43 } else {
44 // Unix-like system, so just assume Unix
45 result, err = dirUnix()
46 }
47
48 if err != nil {
49 return "", err
50 }
51 homedirCache = result
52 return result, nil
53}
54
55// Expand expands the path to include the home directory if the path
56// is prefixed with `~`. If it isn't prefixed with `~`, the path is
57// returned as-is.
58func Expand(path string) (string, error) {
59 if len(path) == 0 {
60 return path, nil
61 }
62
63 if path[0] != '~' {
64 return path, nil
65 }
66
67 if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
68 return "", errors.New("cannot expand user-specific home dir")
69 }
70
71 dir, err := Dir()
72 if err != nil {
73 return "", err
74 }
75
76 return filepath.Join(dir, path[1:]), nil
77}
78
79func dirUnix() (string, error) {
80 // First prefer the HOME environmental variable
81 if home := os.Getenv("HOME"); home != "" {
82 return home, nil
83 }
84
85 // If that fails, try getent
86 var stdout bytes.Buffer
87 cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
88 cmd.Stdout = &stdout
89 if err := cmd.Run(); err != nil {
90 // If the error is ErrNotFound, we ignore it. Otherwise, return it.
91 if err != exec.ErrNotFound {
92 return "", err
93 }
94 } else {
95 if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
96 // username:password:uid:gid:gecos:home:shell
97 passwdParts := strings.SplitN(passwd, ":", 7)
98 if len(passwdParts) > 5 {
99 return passwdParts[5], nil
100 }
101 }
102 }
103
104 // If all else fails, try the shell
105 stdout.Reset()
106 cmd = exec.Command("sh", "-c", "cd && pwd")
107 cmd.Stdout = &stdout
108 if err := cmd.Run(); err != nil {
109 return "", err
110 }
111
112 result := strings.TrimSpace(stdout.String())
113 if result == "" {
114 return "", errors.New("blank output when reading home directory")
115 }
116
117 return result, nil
118}
119
120func dirWindows() (string, error) {
121 // First prefer the HOME environmental variable
122 if home := os.Getenv("HOME"); home != "" {
123 return home, nil
124 }
125
126 drive := os.Getenv("HOMEDRIVE")
127 path := os.Getenv("HOMEPATH")
128 home := drive + path
129 if drive == "" || path == "" {
130 home = os.Getenv("USERPROFILE")
131 }
132 if home == "" {
133 return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
134 }
135
136 return home, nil
137}
diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE
new file mode 100644
index 0000000..a3866a2
--- /dev/null
+++ b/vendor/github.com/mitchellh/hashstructure/LICENSE
@@ -0,0 +1,21 @@
1The MIT License (MIT)
2
3Copyright (c) 2016 Mitchell Hashimoto
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in
13all copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md
new file mode 100644
index 0000000..7d0de5b
--- /dev/null
+++ b/vendor/github.com/mitchellh/hashstructure/README.md
@@ -0,0 +1,61 @@
1# hashstructure
2
3hashstructure is a Go library for creating a unique hash value
4for arbitrary values in Go.
5
6This can be used to key values in a hash (for use in a map, set, etc.)
7that are complex. The most common use case is comparing two values without
8sending data across the network, caching values locally (de-dup), and so on.
9
10## Features
11
12 * Hash any arbitrary Go value, including complex types.
13
14 * Tag a struct field to ignore it and not affect the hash value.
15
16 * Tag a slice type struct field to treat it as a set where ordering
17 doesn't affect the hash code but the field itself is still taken into
18 account to create the hash value.
19
20 * Optionally specify a custom hash function to optimize for speed, collision
21 avoidance for your data set, etc.
22
23## Installation
24
25Standard `go get`:
26
27```
28$ go get github.com/mitchellh/hashstructure
29```
30
31## Usage & Example
32
33For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure).
34
35A quick code example is shown below:
36
37
38 type ComplexStruct struct {
39 Name string
40 Age uint
41 Metadata map[string]interface{}
42 }
43
44 v := ComplexStruct{
45 Name: "mitchellh",
46 Age: 64,
47 Metadata: map[string]interface{}{
48 "car": true,
49 "location": "California",
50 "siblings": []string{"Bob", "John"},
51 },
52 }
53
54 hash, err := hashstructure.Hash(v, nil)
55 if err != nil {
56 panic(err)
57 }
58
59 fmt.Printf("%d", hash)
60 // Output:
61 // 2307517237273902113
diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go
new file mode 100644
index 0000000..6f586fa
--- /dev/null
+++ b/vendor/github.com/mitchellh/hashstructure/hashstructure.go
@@ -0,0 +1,323 @@
1package hashstructure
2
3import (
4 "encoding/binary"
5 "fmt"
6 "hash"
7 "hash/fnv"
8 "reflect"
9)
10
11// HashOptions are options that are available for hashing.
12type HashOptions struct {
13 // Hasher is the hash function to use. If this isn't set, it will
14 // default to FNV.
15 Hasher hash.Hash64
16
17 // TagName is the struct tag to look at when hashing the structure.
18 // By default this is "hash".
19 TagName string
20}
21
22// Hash returns the hash value of an arbitrary value.
23//
24// If opts is nil, then default options will be used. See HashOptions
25// for the default values.
26//
27// Notes on the value:
28//
29// * Unexported fields on structs are ignored and do not affect the
30// hash value.
31//
32// * Adding an exported field to a struct with the zero value will change
33// the hash value.
34//
35// For structs, the hashing can be controlled using tags. For example:
36//
37// struct {
38// Name string
39// UUID string `hash:"ignore"`
40// }
41//
42// The available tag values are:
43//
44// * "ignore" - The field will be ignored and not affect the hash code.
45//
46// * "set" - The field will be treated as a set, where ordering doesn't
47// affect the hash code. This only works for slices.
48//
49func Hash(v interface{}, opts *HashOptions) (uint64, error) {
50 // Create default options
51 if opts == nil {
52 opts = &HashOptions{}
53 }
54 if opts.Hasher == nil {
55 opts.Hasher = fnv.New64()
56 }
57 if opts.TagName == "" {
58 opts.TagName = "hash"
59 }
60
61 // Reset the hash
62 opts.Hasher.Reset()
63
64 // Create our walker and walk the structure
65 w := &walker{
66 h: opts.Hasher,
67 tag: opts.TagName,
68 }
69 return w.visit(reflect.ValueOf(v), nil)
70}
71
72type walker struct {
73 h hash.Hash64
74 tag string
75}
76
77type visitOpts struct {
78 // Flags are a bitmask of flags to affect behavior of this visit
79 Flags visitFlag
80
81 // Information about the struct containing this field
82 Struct interface{}
83 StructField string
84}
85
86func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) {
87 // Loop since these can be wrapped in multiple layers of pointers
88 // and interfaces.
89 for {
90 // If we have an interface, dereference it. We have to do this up
91 // here because it might be a nil in there and the check below must
92 // catch that.
93 if v.Kind() == reflect.Interface {
94 v = v.Elem()
95 continue
96 }
97
98 if v.Kind() == reflect.Ptr {
99 v = reflect.Indirect(v)
100 continue
101 }
102
103 break
104 }
105
106 // If it is nil, treat it like a zero.
107 if !v.IsValid() {
108 var tmp int8
109 v = reflect.ValueOf(tmp)
110 }
111
112 // Binary writing can use raw ints, we have to convert to
113 // a sized-int, we'll choose the largest...
114 switch v.Kind() {
115 case reflect.Int:
116 v = reflect.ValueOf(int64(v.Int()))
117 case reflect.Uint:
118 v = reflect.ValueOf(uint64(v.Uint()))
119 case reflect.Bool:
120 var tmp int8
121 if v.Bool() {
122 tmp = 1
123 }
124 v = reflect.ValueOf(tmp)
125 }
126
127 k := v.Kind()
128
129 // We can shortcut numeric values by directly binary writing them
130 if k >= reflect.Int && k <= reflect.Complex64 {
131 // A direct hash calculation
132 w.h.Reset()
133 err := binary.Write(w.h, binary.LittleEndian, v.Interface())
134 return w.h.Sum64(), err
135 }
136
137 switch k {
138 case reflect.Array:
139 var h uint64
140 l := v.Len()
141 for i := 0; i < l; i++ {
142 current, err := w.visit(v.Index(i), nil)
143 if err != nil {
144 return 0, err
145 }
146
147 h = hashUpdateOrdered(w.h, h, current)
148 }
149
150 return h, nil
151
152 case reflect.Map:
153 var includeMap IncludableMap
154 if opts != nil && opts.Struct != nil {
155 if v, ok := opts.Struct.(IncludableMap); ok {
156 includeMap = v
157 }
158 }
159
160 // Build the hash for the map. We do this by XOR-ing all the key
161 // and value hashes. This makes it deterministic despite ordering.
162 var h uint64
163 for _, k := range v.MapKeys() {
164 v := v.MapIndex(k)
165 if includeMap != nil {
166 incl, err := includeMap.HashIncludeMap(
167 opts.StructField, k.Interface(), v.Interface())
168 if err != nil {
169 return 0, err
170 }
171 if !incl {
172 continue
173 }
174 }
175
176 kh, err := w.visit(k, nil)
177 if err != nil {
178 return 0, err
179 }
180 vh, err := w.visit(v, nil)
181 if err != nil {
182 return 0, err
183 }
184
185 fieldHash := hashUpdateOrdered(w.h, kh, vh)
186 h = hashUpdateUnordered(h, fieldHash)
187 }
188
189 return h, nil
190
191 case reflect.Struct:
192 var include Includable
193 parent := v.Interface()
194 if impl, ok := parent.(Includable); ok {
195 include = impl
196 }
197
198 t := v.Type()
199 h, err := w.visit(reflect.ValueOf(t.Name()), nil)
200 if err != nil {
201 return 0, err
202 }
203
204 l := v.NumField()
205 for i := 0; i < l; i++ {
206 if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
207 var f visitFlag
208 fieldType := t.Field(i)
209 if fieldType.PkgPath != "" {
210 // Unexported
211 continue
212 }
213
214 tag := fieldType.Tag.Get(w.tag)
215 if tag == "ignore" {
216 // Ignore this field
217 continue
218 }
219
220 // Check if we implement includable and check it
221 if include != nil {
222 incl, err := include.HashInclude(fieldType.Name, v)
223 if err != nil {
224 return 0, err
225 }
226 if !incl {
227 continue
228 }
229 }
230
231 switch tag {
232 case "set":
233 f |= visitFlagSet
234 }
235
236 kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil)
237 if err != nil {
238 return 0, err
239 }
240
241 vh, err := w.visit(v, &visitOpts{
242 Flags: f,
243 Struct: parent,
244 StructField: fieldType.Name,
245 })
246 if err != nil {
247 return 0, err
248 }
249
250 fieldHash := hashUpdateOrdered(w.h, kh, vh)
251 h = hashUpdateUnordered(h, fieldHash)
252 }
253 }
254
255 return h, nil
256
257 case reflect.Slice:
258 // We have two behaviors here. If it isn't a set, then we just
259 // visit all the elements. If it is a set, then we do a deterministic
260 // hash code.
261 var h uint64
262 var set bool
263 if opts != nil {
264 set = (opts.Flags & visitFlagSet) != 0
265 }
266 l := v.Len()
267 for i := 0; i < l; i++ {
268 current, err := w.visit(v.Index(i), nil)
269 if err != nil {
270 return 0, err
271 }
272
273 if set {
274 h = hashUpdateUnordered(h, current)
275 } else {
276 h = hashUpdateOrdered(w.h, h, current)
277 }
278 }
279
280 return h, nil
281
282 case reflect.String:
283 // Directly hash
284 w.h.Reset()
285 _, err := w.h.Write([]byte(v.String()))
286 return w.h.Sum64(), err
287
288 default:
289 return 0, fmt.Errorf("unknown kind to hash: %s", k)
290 }
291
292 return 0, nil
293}
294
295func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 {
296 // For ordered updates, use a real hash function
297 h.Reset()
298
299 // We just panic if the binary writes fail because we are writing
300 // an int64 which should never be fail-able.
301 e1 := binary.Write(h, binary.LittleEndian, a)
302 e2 := binary.Write(h, binary.LittleEndian, b)
303 if e1 != nil {
304 panic(e1)
305 }
306 if e2 != nil {
307 panic(e2)
308 }
309
310 return h.Sum64()
311}
312
313func hashUpdateUnordered(a, b uint64) uint64 {
314 return a ^ b
315}
316
317// visitFlag is used as a bitmask for affecting visit behavior
318type visitFlag uint
319
320const (
321 visitFlagInvalid visitFlag = iota
322 visitFlagSet = iota << 1
323)
diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go
new file mode 100644
index 0000000..b6289c0
--- /dev/null
+++ b/vendor/github.com/mitchellh/hashstructure/include.go
@@ -0,0 +1,15 @@
1package hashstructure
2
3// Includable is an interface that can optionally be implemented by
4// a struct. It will be called for each field in the struct to check whether
5// it should be included in the hash.
6type Includable interface {
7 HashInclude(field string, v interface{}) (bool, error)
8}
9
10// IncludableMap is an interface that can optionally be implemented by
11// a struct. It will be called when a map-type field is found to ask the
12// struct if the map item should be included in the hash.
13type IncludableMap interface {
14 HashIncludeMap(field string, k, v interface{}) (bool, error)
15}
diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE
new file mode 100644
index 0000000..f9c841a
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/LICENSE
@@ -0,0 +1,21 @@
1The MIT License (MIT)
2
3Copyright (c) 2013 Mitchell Hashimoto
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in
13all copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md
new file mode 100644
index 0000000..659d688
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/README.md
@@ -0,0 +1,46 @@
1# mapstructure
2
3mapstructure is a Go library for decoding generic map values to structures
4and vice versa, while providing helpful error handling.
5
6This library is most useful when decoding values from some data stream (JSON,
7Gob, etc.) where you don't _quite_ know the structure of the underlying data
8until you read a part of it. You can therefore read a `map[string]interface{}`
9and use this library to decode it into the proper underlying native Go
10structure.
11
12## Installation
13
14Standard `go get`:
15
16```
17$ go get github.com/mitchellh/mapstructure
18```
19
20## Usage & Example
21
22For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
23
24The `Decode` function has examples associated with it there.
25
26## But Why?!
27
28Go offers fantastic standard libraries for decoding formats such as JSON.
29The standard method is to have a struct pre-created, and populate that struct
30from the bytes of the encoded format. This is great, but the problem is if
31you have configuration or an encoding that changes slightly depending on
32specific fields. For example, consider this JSON:
33
34```json
35{
36 "type": "person",
37 "name": "Mitchell"
38}
39```
40
41Perhaps we can't populate a specific structure without first reading
42the "type" field from the JSON. We could always do two passes over the
43decoding of the JSON (reading the "type" first, and the rest later).
44However, it is much simpler to just decode this into a `map[string]interface{}`
45structure, read the "type" key, then use something like this library
46to decode it into the proper structure.
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
new file mode 100644
index 0000000..115ae67
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
@@ -0,0 +1,154 @@
1package mapstructure
2
3import (
4 "errors"
5 "reflect"
6 "strconv"
7 "strings"
8 "time"
9)
10
11// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
12// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
13func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
14 // Create variables here so we can reference them with the reflect pkg
15 var f1 DecodeHookFuncType
16 var f2 DecodeHookFuncKind
17
18 // Fill in the variables into this interface and the rest is done
19 // automatically using the reflect package.
20 potential := []interface{}{f1, f2}
21
22 v := reflect.ValueOf(h)
23 vt := v.Type()
24 for _, raw := range potential {
25 pt := reflect.ValueOf(raw).Type()
26 if vt.ConvertibleTo(pt) {
27 return v.Convert(pt).Interface()
28 }
29 }
30
31 return nil
32}
33
34// DecodeHookExec executes the given decode hook. This should be used
35// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
36// that took reflect.Kind instead of reflect.Type.
37func DecodeHookExec(
38 raw DecodeHookFunc,
39 from reflect.Type, to reflect.Type,
40 data interface{}) (interface{}, error) {
41 // Build our arguments that reflect expects
42 argVals := make([]reflect.Value, 3)
43 argVals[0] = reflect.ValueOf(from)
44 argVals[1] = reflect.ValueOf(to)
45 argVals[2] = reflect.ValueOf(data)
46
47 switch f := typedDecodeHook(raw).(type) {
48 case DecodeHookFuncType:
49 return f(from, to, data)
50 case DecodeHookFuncKind:
51 return f(from.Kind(), to.Kind(), data)
52 default:
53 return nil, errors.New("invalid decode hook signature")
54 }
55}
56
57// ComposeDecodeHookFunc creates a single DecodeHookFunc that
58// automatically composes multiple DecodeHookFuncs.
59//
60// The composed funcs are called in order, with the result of the
61// previous transformation.
62func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
63 return func(
64 f reflect.Type,
65 t reflect.Type,
66 data interface{}) (interface{}, error) {
67 var err error
68 for _, f1 := range fs {
69 data, err = DecodeHookExec(f1, f, t, data)
70 if err != nil {
71 return nil, err
72 }
73
74 // Modify the from kind to be correct with the new data
75 f = nil
76 if val := reflect.ValueOf(data); val.IsValid() {
77 f = val.Type()
78 }
79 }
80
81 return data, nil
82 }
83}
84
85// StringToSliceHookFunc returns a DecodeHookFunc that converts
86// string to []string by splitting on the given sep.
87func StringToSliceHookFunc(sep string) DecodeHookFunc {
88 return func(
89 f reflect.Kind,
90 t reflect.Kind,
91 data interface{}) (interface{}, error) {
92 if f != reflect.String || t != reflect.Slice {
93 return data, nil
94 }
95
96 raw := data.(string)
97 if raw == "" {
98 return []string{}, nil
99 }
100
101 return strings.Split(raw, sep), nil
102 }
103}
104
105// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
106// strings to time.Duration.
107func StringToTimeDurationHookFunc() DecodeHookFunc {
108 return func(
109 f reflect.Type,
110 t reflect.Type,
111 data interface{}) (interface{}, error) {
112 if f.Kind() != reflect.String {
113 return data, nil
114 }
115 if t != reflect.TypeOf(time.Duration(5)) {
116 return data, nil
117 }
118
119 // Convert it by parsing
120 return time.ParseDuration(data.(string))
121 }
122}
123
124func WeaklyTypedHook(
125 f reflect.Kind,
126 t reflect.Kind,
127 data interface{}) (interface{}, error) {
128 dataVal := reflect.ValueOf(data)
129 switch t {
130 case reflect.String:
131 switch f {
132 case reflect.Bool:
133 if dataVal.Bool() {
134 return "1", nil
135 } else {
136 return "0", nil
137 }
138 case reflect.Float32:
139 return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
140 case reflect.Int:
141 return strconv.FormatInt(dataVal.Int(), 10), nil
142 case reflect.Slice:
143 dataType := dataVal.Type()
144 elemKind := dataType.Elem().Kind()
145 if elemKind == reflect.Uint8 {
146 return string(dataVal.Interface().([]uint8)), nil
147 }
148 case reflect.Uint:
149 return strconv.FormatUint(dataVal.Uint(), 10), nil
150 }
151 }
152
153 return data, nil
154}
diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go
new file mode 100644
index 0000000..47a99e5
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/error.go
@@ -0,0 +1,50 @@
1package mapstructure
2
3import (
4 "errors"
5 "fmt"
6 "sort"
7 "strings"
8)
9
10// Error implements the error interface and can represents multiple
11// errors that occur in the course of a single decode.
12type Error struct {
13 Errors []string
14}
15
16func (e *Error) Error() string {
17 points := make([]string, len(e.Errors))
18 for i, err := range e.Errors {
19 points[i] = fmt.Sprintf("* %s", err)
20 }
21
22 sort.Strings(points)
23 return fmt.Sprintf(
24 "%d error(s) decoding:\n\n%s",
25 len(e.Errors), strings.Join(points, "\n"))
26}
27
28// WrappedErrors implements the errwrap.Wrapper interface to make this
29// return value more useful with the errwrap and go-multierror libraries.
30func (e *Error) WrappedErrors() []error {
31 if e == nil {
32 return nil
33 }
34
35 result := make([]error, len(e.Errors))
36 for i, e := range e.Errors {
37 result[i] = errors.New(e)
38 }
39
40 return result
41}
42
43func appendErrors(errors []string, err error) []string {
44 switch e := err.(type) {
45 case *Error:
46 return append(errors, e.Errors...)
47 default:
48 return append(errors, e.Error())
49 }
50}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
new file mode 100644
index 0000000..6dee0ef
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -0,0 +1,823 @@
1// The mapstructure package exposes functionality to convert an
2// arbitrary map[string]interface{} into a native Go structure.
3//
4// The Go structure can be arbitrarily complex, containing slices,
5// other structs, etc. and the decoder will properly decode nested
6// maps and so on into the proper structures in the native Go struct.
7// See the examples to see what the decoder is capable of.
8package mapstructure
9
10import (
11 "encoding/json"
12 "errors"
13 "fmt"
14 "reflect"
15 "sort"
16 "strconv"
17 "strings"
18)
19
20// DecodeHookFunc is the callback function that can be used for
21// data transformations. See "DecodeHook" in the DecoderConfig
22// struct.
23//
24// The type should be DecodeHookFuncType or DecodeHookFuncKind.
25// Either is accepted. Types are a superset of Kinds (Types can return
26// Kinds) and are generally a richer thing to use, but Kinds are simpler
27// if you only need those.
28//
29// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
30// we started with Kinds and then realized Types were the better solution,
31// but have a promise to not break backwards compat so we now support
32// both.
33type DecodeHookFunc interface{}
34
35type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
36type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
37
38// DecoderConfig is the configuration that is used to create a new decoder
39// and allows customization of various aspects of decoding.
40type DecoderConfig struct {
41 // DecodeHook, if set, will be called before any decoding and any
42 // type conversion (if WeaklyTypedInput is on). This lets you modify
43 // the values before they're set down onto the resulting struct.
44 //
45 // If an error is returned, the entire decode will fail with that
46 // error.
47 DecodeHook DecodeHookFunc
48
49 // If ErrorUnused is true, then it is an error for there to exist
50 // keys in the original map that were unused in the decoding process
51 // (extra keys).
52 ErrorUnused bool
53
54 // ZeroFields, if set to true, will zero fields before writing them.
55 // For example, a map will be emptied before decoded values are put in
56 // it. If this is false, a map will be merged.
57 ZeroFields bool
58
59 // If WeaklyTypedInput is true, the decoder will make the following
60 // "weak" conversions:
61 //
62 // - bools to string (true = "1", false = "0")
63 // - numbers to string (base 10)
64 // - bools to int/uint (true = 1, false = 0)
65 // - strings to int/uint (base implied by prefix)
66 // - int to bool (true if value != 0)
67 // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
68 // FALSE, false, False. Anything else is an error)
69 // - empty array = empty map and vice versa
70 // - negative numbers to overflowed uint values (base 10)
71 // - slice of maps to a merged map
72 // - single values are converted to slices if required. Each
73 // element is weakly decoded. For example: "4" can become []int{4}
74 // if the target type is an int slice.
75 //
76 WeaklyTypedInput bool
77
78 // Metadata is the struct that will contain extra metadata about
79 // the decoding. If this is nil, then no metadata will be tracked.
80 Metadata *Metadata
81
82 // Result is a pointer to the struct that will contain the decoded
83 // value.
84 Result interface{}
85
86 // The tag name that mapstructure reads for field names. This
87 // defaults to "mapstructure"
88 TagName string
89}
90
91// A Decoder takes a raw interface value and turns it into structured
92// data, keeping track of rich error information along the way in case
93// anything goes wrong. Unlike the basic top-level Decode method, you can
94// more finely control how the Decoder behaves using the DecoderConfig
95// structure. The top-level Decode method is just a convenience that sets
96// up the most basic Decoder.
97type Decoder struct {
98 config *DecoderConfig
99}
100
101// Metadata contains information about decoding a structure that
102// is tedious or difficult to get otherwise.
103type Metadata struct {
104 // Keys are the keys of the structure which were successfully decoded
105 Keys []string
106
107 // Unused is a slice of keys that were found in the raw value but
108 // weren't decoded since there was no matching field in the result interface
109 Unused []string
110}
111
112// Decode takes a map and uses reflection to convert it into the
113// given Go native structure. val must be a pointer to a struct.
114func Decode(m interface{}, rawVal interface{}) error {
115 config := &DecoderConfig{
116 Metadata: nil,
117 Result: rawVal,
118 }
119
120 decoder, err := NewDecoder(config)
121 if err != nil {
122 return err
123 }
124
125 return decoder.Decode(m)
126}
127
128// WeakDecode is the same as Decode but is shorthand to enable
129// WeaklyTypedInput. See DecoderConfig for more info.
130func WeakDecode(input, output interface{}) error {
131 config := &DecoderConfig{
132 Metadata: nil,
133 Result: output,
134 WeaklyTypedInput: true,
135 }
136
137 decoder, err := NewDecoder(config)
138 if err != nil {
139 return err
140 }
141
142 return decoder.Decode(input)
143}
144
145// NewDecoder returns a new decoder for the given configuration. Once
146// a decoder has been returned, the same configuration must not be used
147// again.
148func NewDecoder(config *DecoderConfig) (*Decoder, error) {
149 val := reflect.ValueOf(config.Result)
150 if val.Kind() != reflect.Ptr {
151 return nil, errors.New("result must be a pointer")
152 }
153
154 val = val.Elem()
155 if !val.CanAddr() {
156 return nil, errors.New("result must be addressable (a pointer)")
157 }
158
159 if config.Metadata != nil {
160 if config.Metadata.Keys == nil {
161 config.Metadata.Keys = make([]string, 0)
162 }
163
164 if config.Metadata.Unused == nil {
165 config.Metadata.Unused = make([]string, 0)
166 }
167 }
168
169 if config.TagName == "" {
170 config.TagName = "mapstructure"
171 }
172
173 result := &Decoder{
174 config: config,
175 }
176
177 return result, nil
178}
179
180// Decode decodes the given raw interface to the target pointer specified
181// by the configuration.
182func (d *Decoder) Decode(raw interface{}) error {
183 return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem())
184}
185
186// Decodes an unknown data type into a specific reflection value.
187func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error {
188 if data == nil {
189 // If the data is nil, then we don't set anything.
190 return nil
191 }
192
193 dataVal := reflect.ValueOf(data)
194 if !dataVal.IsValid() {
195 // If the data value is invalid, then we just set the value
196 // to be the zero value.
197 val.Set(reflect.Zero(val.Type()))
198 return nil
199 }
200
201 if d.config.DecodeHook != nil {
202 // We have a DecodeHook, so let's pre-process the data.
203 var err error
204 data, err = DecodeHookExec(
205 d.config.DecodeHook,
206 dataVal.Type(), val.Type(), data)
207 if err != nil {
208 return fmt.Errorf("error decoding '%s': %s", name, err)
209 }
210 }
211
212 var err error
213 dataKind := getKind(val)
214 switch dataKind {
215 case reflect.Bool:
216 err = d.decodeBool(name, data, val)
217 case reflect.Interface:
218 err = d.decodeBasic(name, data, val)
219 case reflect.String:
220 err = d.decodeString(name, data, val)
221 case reflect.Int:
222 err = d.decodeInt(name, data, val)
223 case reflect.Uint:
224 err = d.decodeUint(name, data, val)
225 case reflect.Float32:
226 err = d.decodeFloat(name, data, val)
227 case reflect.Struct:
228 err = d.decodeStruct(name, data, val)
229 case reflect.Map:
230 err = d.decodeMap(name, data, val)
231 case reflect.Ptr:
232 err = d.decodePtr(name, data, val)
233 case reflect.Slice:
234 err = d.decodeSlice(name, data, val)
235 case reflect.Func:
236 err = d.decodeFunc(name, data, val)
237 default:
238 // If we reached this point then we weren't able to decode it
239 return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
240 }
241
242 // If we reached here, then we successfully decoded SOMETHING, so
243 // mark the key as used if we're tracking metadata.
244 if d.config.Metadata != nil && name != "" {
245 d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
246 }
247
248 return err
249}
250
251// This decodes a basic type (bool, int, string, etc.) and sets the
252// value to "data" of that type.
253func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
254 dataVal := reflect.ValueOf(data)
255 if !dataVal.IsValid() {
256 dataVal = reflect.Zero(val.Type())
257 }
258
259 dataValType := dataVal.Type()
260 if !dataValType.AssignableTo(val.Type()) {
261 return fmt.Errorf(
262 "'%s' expected type '%s', got '%s'",
263 name, val.Type(), dataValType)
264 }
265
266 val.Set(dataVal)
267 return nil
268}
269
270func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
271 dataVal := reflect.ValueOf(data)
272 dataKind := getKind(dataVal)
273
274 converted := true
275 switch {
276 case dataKind == reflect.String:
277 val.SetString(dataVal.String())
278 case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
279 if dataVal.Bool() {
280 val.SetString("1")
281 } else {
282 val.SetString("0")
283 }
284 case dataKind == reflect.Int && d.config.WeaklyTypedInput:
285 val.SetString(strconv.FormatInt(dataVal.Int(), 10))
286 case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
287 val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
288 case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
289 val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
290 case dataKind == reflect.Slice && d.config.WeaklyTypedInput:
291 dataType := dataVal.Type()
292 elemKind := dataType.Elem().Kind()
293 switch {
294 case elemKind == reflect.Uint8:
295 val.SetString(string(dataVal.Interface().([]uint8)))
296 default:
297 converted = false
298 }
299 default:
300 converted = false
301 }
302
303 if !converted {
304 return fmt.Errorf(
305 "'%s' expected type '%s', got unconvertible type '%s'",
306 name, val.Type(), dataVal.Type())
307 }
308
309 return nil
310}
311
312func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
313 dataVal := reflect.ValueOf(data)
314 dataKind := getKind(dataVal)
315 dataType := dataVal.Type()
316
317 switch {
318 case dataKind == reflect.Int:
319 val.SetInt(dataVal.Int())
320 case dataKind == reflect.Uint:
321 val.SetInt(int64(dataVal.Uint()))
322 case dataKind == reflect.Float32:
323 val.SetInt(int64(dataVal.Float()))
324 case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
325 if dataVal.Bool() {
326 val.SetInt(1)
327 } else {
328 val.SetInt(0)
329 }
330 case dataKind == reflect.String && d.config.WeaklyTypedInput:
331 i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
332 if err == nil {
333 val.SetInt(i)
334 } else {
335 return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
336 }
337 case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
338 jn := data.(json.Number)
339 i, err := jn.Int64()
340 if err != nil {
341 return fmt.Errorf(
342 "error decoding json.Number into %s: %s", name, err)
343 }
344 val.SetInt(i)
345 default:
346 return fmt.Errorf(
347 "'%s' expected type '%s', got unconvertible type '%s'",
348 name, val.Type(), dataVal.Type())
349 }
350
351 return nil
352}
353
354func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
355 dataVal := reflect.ValueOf(data)
356 dataKind := getKind(dataVal)
357
358 switch {
359 case dataKind == reflect.Int:
360 i := dataVal.Int()
361 if i < 0 && !d.config.WeaklyTypedInput {
362 return fmt.Errorf("cannot parse '%s', %d overflows uint",
363 name, i)
364 }
365 val.SetUint(uint64(i))
366 case dataKind == reflect.Uint:
367 val.SetUint(dataVal.Uint())
368 case dataKind == reflect.Float32:
369 f := dataVal.Float()
370 if f < 0 && !d.config.WeaklyTypedInput {
371 return fmt.Errorf("cannot parse '%s', %f overflows uint",
372 name, f)
373 }
374 val.SetUint(uint64(f))
375 case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
376 if dataVal.Bool() {
377 val.SetUint(1)
378 } else {
379 val.SetUint(0)
380 }
381 case dataKind == reflect.String && d.config.WeaklyTypedInput:
382 i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
383 if err == nil {
384 val.SetUint(i)
385 } else {
386 return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
387 }
388 default:
389 return fmt.Errorf(
390 "'%s' expected type '%s', got unconvertible type '%s'",
391 name, val.Type(), dataVal.Type())
392 }
393
394 return nil
395}
396
397func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
398 dataVal := reflect.ValueOf(data)
399 dataKind := getKind(dataVal)
400
401 switch {
402 case dataKind == reflect.Bool:
403 val.SetBool(dataVal.Bool())
404 case dataKind == reflect.Int && d.config.WeaklyTypedInput:
405 val.SetBool(dataVal.Int() != 0)
406 case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
407 val.SetBool(dataVal.Uint() != 0)
408 case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
409 val.SetBool(dataVal.Float() != 0)
410 case dataKind == reflect.String && d.config.WeaklyTypedInput:
411 b, err := strconv.ParseBool(dataVal.String())
412 if err == nil {
413 val.SetBool(b)
414 } else if dataVal.String() == "" {
415 val.SetBool(false)
416 } else {
417 return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
418 }
419 default:
420 return fmt.Errorf(
421 "'%s' expected type '%s', got unconvertible type '%s'",
422 name, val.Type(), dataVal.Type())
423 }
424
425 return nil
426}
427
428func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
429 dataVal := reflect.ValueOf(data)
430 dataKind := getKind(dataVal)
431 dataType := dataVal.Type()
432
433 switch {
434 case dataKind == reflect.Int:
435 val.SetFloat(float64(dataVal.Int()))
436 case dataKind == reflect.Uint:
437 val.SetFloat(float64(dataVal.Uint()))
438 case dataKind == reflect.Float32:
439 val.SetFloat(float64(dataVal.Float()))
440 case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
441 if dataVal.Bool() {
442 val.SetFloat(1)
443 } else {
444 val.SetFloat(0)
445 }
446 case dataKind == reflect.String && d.config.WeaklyTypedInput:
447 f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
448 if err == nil {
449 val.SetFloat(f)
450 } else {
451 return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
452 }
453 case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
454 jn := data.(json.Number)
455 i, err := jn.Float64()
456 if err != nil {
457 return fmt.Errorf(
458 "error decoding json.Number into %s: %s", name, err)
459 }
460 val.SetFloat(i)
461 default:
462 return fmt.Errorf(
463 "'%s' expected type '%s', got unconvertible type '%s'",
464 name, val.Type(), dataVal.Type())
465 }
466
467 return nil
468}
469
470func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
471 valType := val.Type()
472 valKeyType := valType.Key()
473 valElemType := valType.Elem()
474
475 // By default we overwrite keys in the current map
476 valMap := val
477
478 // If the map is nil or we're purposely zeroing fields, make a new map
479 if valMap.IsNil() || d.config.ZeroFields {
480 // Make a new map to hold our result
481 mapType := reflect.MapOf(valKeyType, valElemType)
482 valMap = reflect.MakeMap(mapType)
483 }
484
485 // Check input type
486 dataVal := reflect.Indirect(reflect.ValueOf(data))
487 if dataVal.Kind() != reflect.Map {
488 // In weak mode, we accept a slice of maps as an input...
489 if d.config.WeaklyTypedInput {
490 switch dataVal.Kind() {
491 case reflect.Array, reflect.Slice:
492 // Special case for BC reasons (covered by tests)
493 if dataVal.Len() == 0 {
494 val.Set(valMap)
495 return nil
496 }
497
498 for i := 0; i < dataVal.Len(); i++ {
499 err := d.decode(
500 fmt.Sprintf("%s[%d]", name, i),
501 dataVal.Index(i).Interface(), val)
502 if err != nil {
503 return err
504 }
505 }
506
507 return nil
508 }
509 }
510
511 return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
512 }
513
514 // Accumulate errors
515 errors := make([]string, 0)
516
517 for _, k := range dataVal.MapKeys() {
518 fieldName := fmt.Sprintf("%s[%s]", name, k)
519
520 // First decode the key into the proper type
521 currentKey := reflect.Indirect(reflect.New(valKeyType))
522 if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
523 errors = appendErrors(errors, err)
524 continue
525 }
526
527 // Next decode the data into the proper type
528 v := dataVal.MapIndex(k).Interface()
529 currentVal := reflect.Indirect(reflect.New(valElemType))
530 if err := d.decode(fieldName, v, currentVal); err != nil {
531 errors = appendErrors(errors, err)
532 continue
533 }
534
535 valMap.SetMapIndex(currentKey, currentVal)
536 }
537
538 // Set the built up map to the value
539 val.Set(valMap)
540
541 // If we had errors, return those
542 if len(errors) > 0 {
543 return &Error{errors}
544 }
545
546 return nil
547}
548
549func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
550 // Create an element of the concrete (non pointer) type and decode
551 // into that. Then set the value of the pointer to this type.
552 valType := val.Type()
553 valElemType := valType.Elem()
554
555 realVal := val
556 if realVal.IsNil() || d.config.ZeroFields {
557 realVal = reflect.New(valElemType)
558 }
559
560 if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
561 return err
562 }
563
564 val.Set(realVal)
565 return nil
566}
567
568func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
569 // Create an element of the concrete (non pointer) type and decode
570 // into that. Then set the value of the pointer to this type.
571 dataVal := reflect.Indirect(reflect.ValueOf(data))
572 if val.Type() != dataVal.Type() {
573 return fmt.Errorf(
574 "'%s' expected type '%s', got unconvertible type '%s'",
575 name, val.Type(), dataVal.Type())
576 }
577 val.Set(dataVal)
578 return nil
579}
580
581func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
582 dataVal := reflect.Indirect(reflect.ValueOf(data))
583 dataValKind := dataVal.Kind()
584 valType := val.Type()
585 valElemType := valType.Elem()
586 sliceType := reflect.SliceOf(valElemType)
587
588 valSlice := val
589 if valSlice.IsNil() || d.config.ZeroFields {
590 // Check input type
591 if dataValKind != reflect.Array && dataValKind != reflect.Slice {
592 if d.config.WeaklyTypedInput {
593 switch {
594 // Empty maps turn into empty slices
595 case dataValKind == reflect.Map:
596 if dataVal.Len() == 0 {
597 val.Set(reflect.MakeSlice(sliceType, 0, 0))
598 return nil
599 }
600
601 // All other types we try to convert to the slice type
602 // and "lift" it into it. i.e. a string becomes a string slice.
603 default:
604 // Just re-try this function with data as a slice.
605 return d.decodeSlice(name, []interface{}{data}, val)
606 }
607 }
608
609 return fmt.Errorf(
610 "'%s': source data must be an array or slice, got %s", name, dataValKind)
611
612 }
613
614 // Make a new slice to hold our result, same size as the original data.
615 valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
616 }
617
618 // Accumulate any errors
619 errors := make([]string, 0)
620
621 for i := 0; i < dataVal.Len(); i++ {
622 currentData := dataVal.Index(i).Interface()
623 for valSlice.Len() <= i {
624 valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
625 }
626 currentField := valSlice.Index(i)
627
628 fieldName := fmt.Sprintf("%s[%d]", name, i)
629 if err := d.decode(fieldName, currentData, currentField); err != nil {
630 errors = appendErrors(errors, err)
631 }
632 }
633
634 // Finally, set the value to the slice we built up
635 val.Set(valSlice)
636
637 // If there were errors, we return those
638 if len(errors) > 0 {
639 return &Error{errors}
640 }
641
642 return nil
643}
644
645func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
646 dataVal := reflect.Indirect(reflect.ValueOf(data))
647
648 // If the type of the value to write to and the data match directly,
649 // then we just set it directly instead of recursing into the structure.
650 if dataVal.Type() == val.Type() {
651 val.Set(dataVal)
652 return nil
653 }
654
655 dataValKind := dataVal.Kind()
656 if dataValKind != reflect.Map {
657 return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
658 }
659
660 dataValType := dataVal.Type()
661 if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
662 return fmt.Errorf(
663 "'%s' needs a map with string keys, has '%s' keys",
664 name, dataValType.Key().Kind())
665 }
666
667 dataValKeys := make(map[reflect.Value]struct{})
668 dataValKeysUnused := make(map[interface{}]struct{})
669 for _, dataValKey := range dataVal.MapKeys() {
670 dataValKeys[dataValKey] = struct{}{}
671 dataValKeysUnused[dataValKey.Interface()] = struct{}{}
672 }
673
674 errors := make([]string, 0)
675
676 // This slice will keep track of all the structs we'll be decoding.
677 // There can be more than one struct if there are embedded structs
678 // that are squashed.
679 structs := make([]reflect.Value, 1, 5)
680 structs[0] = val
681
682 // Compile the list of all the fields that we're going to be decoding
683 // from all the structs.
684 fields := make(map[*reflect.StructField]reflect.Value)
685 for len(structs) > 0 {
686 structVal := structs[0]
687 structs = structs[1:]
688
689 structType := structVal.Type()
690
691 for i := 0; i < structType.NumField(); i++ {
692 fieldType := structType.Field(i)
693 fieldKind := fieldType.Type.Kind()
694
695 // If "squash" is specified in the tag, we squash the field down.
696 squash := false
697 tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
698 for _, tag := range tagParts[1:] {
699 if tag == "squash" {
700 squash = true
701 break
702 }
703 }
704
705 if squash {
706 if fieldKind != reflect.Struct {
707 errors = appendErrors(errors,
708 fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
709 } else {
710 structs = append(structs, val.FieldByName(fieldType.Name))
711 }
712 continue
713 }
714
715 // Normal struct field, store it away
716 fields[&fieldType] = structVal.Field(i)
717 }
718 }
719
720 for fieldType, field := range fields {
721 fieldName := fieldType.Name
722
723 tagValue := fieldType.Tag.Get(d.config.TagName)
724 tagValue = strings.SplitN(tagValue, ",", 2)[0]
725 if tagValue != "" {
726 fieldName = tagValue
727 }
728
729 rawMapKey := reflect.ValueOf(fieldName)
730 rawMapVal := dataVal.MapIndex(rawMapKey)
731 if !rawMapVal.IsValid() {
732 // Do a slower search by iterating over each key and
733 // doing case-insensitive search.
734 for dataValKey := range dataValKeys {
735 mK, ok := dataValKey.Interface().(string)
736 if !ok {
737 // Not a string key
738 continue
739 }
740
741 if strings.EqualFold(mK, fieldName) {
742 rawMapKey = dataValKey
743 rawMapVal = dataVal.MapIndex(dataValKey)
744 break
745 }
746 }
747
748 if !rawMapVal.IsValid() {
749 // There was no matching key in the map for the value in
750 // the struct. Just ignore.
751 continue
752 }
753 }
754
755 // Delete the key we're using from the unused map so we stop tracking
756 delete(dataValKeysUnused, rawMapKey.Interface())
757
758 if !field.IsValid() {
759 // This should never happen
760 panic("field is not valid")
761 }
762
763 // If we can't set the field, then it is unexported or something,
764 // and we just continue onwards.
765 if !field.CanSet() {
766 continue
767 }
768
769 // If the name is empty string, then we're at the root, and we
770 // don't dot-join the fields.
771 if name != "" {
772 fieldName = fmt.Sprintf("%s.%s", name, fieldName)
773 }
774
775 if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil {
776 errors = appendErrors(errors, err)
777 }
778 }
779
780 if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
781 keys := make([]string, 0, len(dataValKeysUnused))
782 for rawKey := range dataValKeysUnused {
783 keys = append(keys, rawKey.(string))
784 }
785 sort.Strings(keys)
786
787 err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
788 errors = appendErrors(errors, err)
789 }
790
791 if len(errors) > 0 {
792 return &Error{errors}
793 }
794
795 // Add the unused keys to the list of unused keys if we're tracking metadata
796 if d.config.Metadata != nil {
797 for rawKey := range dataValKeysUnused {
798 key := rawKey.(string)
799 if name != "" {
800 key = fmt.Sprintf("%s.%s", name, key)
801 }
802
803 d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
804 }
805 }
806
807 return nil
808}
809
810func getKind(val reflect.Value) reflect.Kind {
811 kind := val.Kind()
812
813 switch {
814 case kind >= reflect.Int && kind <= reflect.Int64:
815 return reflect.Int
816 case kind >= reflect.Uint && kind <= reflect.Uint64:
817 return reflect.Uint
818 case kind >= reflect.Float32 && kind <= reflect.Float64:
819 return reflect.Float32
820 default:
821 return kind
822 }
823}
diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE
new file mode 100644
index 0000000..f9c841a
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE
@@ -0,0 +1,21 @@
1The MIT License (MIT)
2
3Copyright (c) 2013 Mitchell Hashimoto
4
5Permission is hereby granted, free of charge, to any person obtaining a copy
6of this software and associated documentation files (the "Software"), to deal
7in the Software without restriction, including without limitation the rights
8to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9copies of the Software, and to permit persons to whom the Software is
10furnished to do so, subject to the following conditions:
11
12The above copyright notice and this permission notice shall be included in
13all copies or substantial portions of the Software.
14
15THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md
new file mode 100644
index 0000000..ac82cd2
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/README.md
@@ -0,0 +1,6 @@
1# reflectwalk
2
3reflectwalk is a Go library for "walking" a value in Go using reflection,
4in the same way a directory tree can be "walked" on the filesystem. Walking
5a complex structure can allow you to do manipulations on unknown structures
6such as those decoded from JSON.
diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go
new file mode 100644
index 0000000..7c59d76
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/location.go
@@ -0,0 +1,17 @@
1package reflectwalk
2
3//go:generate stringer -type=Location location.go
4
5type Location uint
6
7const (
8 None Location = iota
9 Map
10 MapKey
11 MapValue
12 Slice
13 SliceElem
14 Struct
15 StructField
16 WalkLoc
17)
diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go
new file mode 100644
index 0000000..d3cfe85
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go
@@ -0,0 +1,16 @@
1// generated by stringer -type=Location location.go; DO NOT EDIT
2
3package reflectwalk
4
5import "fmt"
6
7const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemStructStructFieldWalkLoc"
8
9var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 41, 52, 59}
10
11func (i Location) String() string {
12 if i+1 >= Location(len(_Location_index)) {
13 return fmt.Sprintf("Location(%d)", i)
14 }
15 return _Location_name[_Location_index[i]:_Location_index[i+1]]
16}
diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
new file mode 100644
index 0000000..ec0a623
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
@@ -0,0 +1,339 @@
1// reflectwalk is a package that allows you to "walk" complex structures
2// similar to how you may "walk" a filesystem: visiting every element one
3// by one and calling callback functions allowing you to handle and manipulate
4// those elements.
5package reflectwalk
6
7import (
8 "errors"
9 "reflect"
10)
11
12// PrimitiveWalker implementations are able to handle primitive values
13// within complex structures. Primitive values are numbers, strings,
14// booleans, funcs, chans.
15//
16// These primitive values are often members of more complex
17// structures (slices, maps, etc.) that are walkable by other interfaces.
18type PrimitiveWalker interface {
19 Primitive(reflect.Value) error
20}
21
22// InterfaceWalker implementations are able to handle interface values as they
23// are encountered during the walk.
24type InterfaceWalker interface {
25 Interface(reflect.Value) error
26}
27
28// MapWalker implementations are able to handle individual elements
29// found within a map structure.
30type MapWalker interface {
31 Map(m reflect.Value) error
32 MapElem(m, k, v reflect.Value) error
33}
34
35// SliceWalker implementations are able to handle slice elements found
36// within complex structures.
37type SliceWalker interface {
38 Slice(reflect.Value) error
39 SliceElem(int, reflect.Value) error
40}
41
42// StructWalker is an interface that has methods that are called for
43// structs when a Walk is done.
44type StructWalker interface {
45 Struct(reflect.Value) error
46 StructField(reflect.StructField, reflect.Value) error
47}
48
49// EnterExitWalker implementations are notified before and after
50// they walk deeper into complex structures (into struct fields,
51// into slice elements, etc.)
52type EnterExitWalker interface {
53 Enter(Location) error
54 Exit(Location) error
55}
56
57// PointerWalker implementations are notified when the value they're
58// walking is a pointer or not. Pointer is called for _every_ value whether
59// it is a pointer or not.
60type PointerWalker interface {
61 PointerEnter(bool) error
62 PointerExit(bool) error
63}
64
65// SkipEntry can be returned from walk functions to skip walking
66// the value of this field. This is only valid in the following functions:
67//
68// - StructField: skips walking the struct value
69//
70var SkipEntry = errors.New("skip this entry")
71
72// Walk takes an arbitrary value and an interface and traverses the
73// value, calling callbacks on the interface if they are supported.
74// The interface should implement one or more of the walker interfaces
75// in this package, such as PrimitiveWalker, StructWalker, etc.
76func Walk(data, walker interface{}) (err error) {
77 v := reflect.ValueOf(data)
78 ew, ok := walker.(EnterExitWalker)
79 if ok {
80 err = ew.Enter(WalkLoc)
81 }
82
83 if err == nil {
84 err = walk(v, walker)
85 }
86
87 if ok && err == nil {
88 err = ew.Exit(WalkLoc)
89 }
90
91 return
92}
93
94func walk(v reflect.Value, w interface{}) (err error) {
95 // Determine if we're receiving a pointer and if so notify the walker.
96 // The logic here is convoluted but very important (tests will fail if
97 // almost any part is changed). I will try to explain here.
98 //
99 // First, we check if the value is an interface, if so, we really need
100 // to check the interface's VALUE to see whether it is a pointer.
101 //
102 // Check whether the value is then a pointer. If so, then set pointer
103 // to true to notify the user.
104 //
105 // If we still have a pointer or an interface after the indirections, then
106 // we unwrap another level
107 //
108 // At this time, we also set "v" to be the dereferenced value. This is
109 // because once we've unwrapped the pointer we want to use that value.
110 pointer := false
111 pointerV := v
112
113 for {
114 if pointerV.Kind() == reflect.Interface {
115 if iw, ok := w.(InterfaceWalker); ok {
116 if err = iw.Interface(pointerV); err != nil {
117 return
118 }
119 }
120
121 pointerV = pointerV.Elem()
122 }
123
124 if pointerV.Kind() == reflect.Ptr {
125 pointer = true
126 v = reflect.Indirect(pointerV)
127 }
128 if pw, ok := w.(PointerWalker); ok {
129 if err = pw.PointerEnter(pointer); err != nil {
130 return
131 }
132
133 defer func(pointer bool) {
134 if err != nil {
135 return
136 }
137
138 err = pw.PointerExit(pointer)
139 }(pointer)
140 }
141
142 if pointer {
143 pointerV = v
144 }
145 pointer = false
146
147 // If we still have a pointer or interface we have to indirect another level.
148 switch pointerV.Kind() {
149 case reflect.Ptr, reflect.Interface:
150 continue
151 }
152 break
153 }
154
155 // We preserve the original value here because if it is an interface
156 // type, we want to pass that directly into the walkPrimitive, so that
157 // we can set it.
158 originalV := v
159 if v.Kind() == reflect.Interface {
160 v = v.Elem()
161 }
162
163 k := v.Kind()
164 if k >= reflect.Int && k <= reflect.Complex128 {
165 k = reflect.Int
166 }
167
168 switch k {
169 // Primitives
170 case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid:
171 err = walkPrimitive(originalV, w)
172 return
173 case reflect.Map:
174 err = walkMap(v, w)
175 return
176 case reflect.Slice:
177 err = walkSlice(v, w)
178 return
179 case reflect.Struct:
180 err = walkStruct(v, w)
181 return
182 default:
183 panic("unsupported type: " + k.String())
184 }
185}
186
187func walkMap(v reflect.Value, w interface{}) error {
188 ew, ewok := w.(EnterExitWalker)
189 if ewok {
190 ew.Enter(Map)
191 }
192
193 if mw, ok := w.(MapWalker); ok {
194 if err := mw.Map(v); err != nil {
195 return err
196 }
197 }
198
199 for _, k := range v.MapKeys() {
200 kv := v.MapIndex(k)
201
202 if mw, ok := w.(MapWalker); ok {
203 if err := mw.MapElem(v, k, kv); err != nil {
204 return err
205 }
206 }
207
208 ew, ok := w.(EnterExitWalker)
209 if ok {
210 ew.Enter(MapKey)
211 }
212
213 if err := walk(k, w); err != nil {
214 return err
215 }
216
217 if ok {
218 ew.Exit(MapKey)
219 ew.Enter(MapValue)
220 }
221
222 if err := walk(kv, w); err != nil {
223 return err
224 }
225
226 if ok {
227 ew.Exit(MapValue)
228 }
229 }
230
231 if ewok {
232 ew.Exit(Map)
233 }
234
235 return nil
236}
237
238func walkPrimitive(v reflect.Value, w interface{}) error {
239 if pw, ok := w.(PrimitiveWalker); ok {
240 return pw.Primitive(v)
241 }
242
243 return nil
244}
245
246func walkSlice(v reflect.Value, w interface{}) (err error) {
247 ew, ok := w.(EnterExitWalker)
248 if ok {
249 ew.Enter(Slice)
250 }
251
252 if sw, ok := w.(SliceWalker); ok {
253 if err := sw.Slice(v); err != nil {
254 return err
255 }
256 }
257
258 for i := 0; i < v.Len(); i++ {
259 elem := v.Index(i)
260
261 if sw, ok := w.(SliceWalker); ok {
262 if err := sw.SliceElem(i, elem); err != nil {
263 return err
264 }
265 }
266
267 ew, ok := w.(EnterExitWalker)
268 if ok {
269 ew.Enter(SliceElem)
270 }
271
272 if err := walk(elem, w); err != nil {
273 return err
274 }
275
276 if ok {
277 ew.Exit(SliceElem)
278 }
279 }
280
281 ew, ok = w.(EnterExitWalker)
282 if ok {
283 ew.Exit(Slice)
284 }
285
286 return nil
287}
288
289func walkStruct(v reflect.Value, w interface{}) (err error) {
290 ew, ewok := w.(EnterExitWalker)
291 if ewok {
292 ew.Enter(Struct)
293 }
294
295 if sw, ok := w.(StructWalker); ok {
296 if err = sw.Struct(v); err != nil {
297 return
298 }
299 }
300
301 vt := v.Type()
302 for i := 0; i < vt.NumField(); i++ {
303 sf := vt.Field(i)
304 f := v.FieldByIndex([]int{i})
305
306 if sw, ok := w.(StructWalker); ok {
307 err = sw.StructField(sf, f)
308
309 // SkipEntry just pretends this field doesn't even exist
310 if err == SkipEntry {
311 continue
312 }
313
314 if err != nil {
315 return
316 }
317 }
318
319 ew, ok := w.(EnterExitWalker)
320 if ok {
321 ew.Enter(StructField)
322 }
323
324 err = walk(f, w)
325 if err != nil {
326 return
327 }
328
329 if ok {
330 ew.Exit(StructField)
331 }
332 }
333
334 if ewok {
335 ew.Exit(Struct)
336 }
337
338 return nil
339}
diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE
new file mode 100644
index 0000000..488357b
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/LICENSE
@@ -0,0 +1,20 @@
1Copyright (C) 2013-2016 by Maxim Bublis <b@codemonkey.ru>
2
3Permission is hereby granted, free of charge, to any person obtaining
4a copy of this software and associated documentation files (the
5"Software"), to deal in the Software without restriction, including
6without limitation the rights to use, copy, modify, merge, publish,
7distribute, sublicense, and/or sell copies of the Software, and to
8permit persons to whom the Software is furnished to do so, subject to
9the following conditions:
10
11The above copyright notice and this permission notice shall be
12included in all copies or substantial portions of the Software.
13
14THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md
new file mode 100644
index 0000000..b6aad1c
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/README.md
@@ -0,0 +1,65 @@
1# UUID package for Go language
2
3[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid)
4[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid)
5[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid)
6
7This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs.
8
9With 100% test coverage and benchmarks out of box.
10
11Supported versions:
12* Version 1, based on timestamp and MAC address (RFC 4122)
13* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1)
14* Version 3, based on MD5 hashing (RFC 4122)
15* Version 4, based on random numbers (RFC 4122)
16* Version 5, based on SHA-1 hashing (RFC 4122)
17
18## Installation
19
20Use the `go` command:
21
22 $ go get github.com/satori/go.uuid
23
24## Requirements
25
26UUID package requires Go >= 1.2.
27
28## Example
29
30```go
31package main
32
33import (
34 "fmt"
35 "github.com/satori/go.uuid"
36)
37
38func main() {
39 // Creating UUID Version 4
40 u1 := uuid.NewV4()
41 fmt.Printf("UUIDv4: %s\n", u1)
42
43 // Parsing UUID from string input
44 u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
45 if err != nil {
46 fmt.Printf("Something gone wrong: %s", err)
47 }
48 fmt.Printf("Successfully parsed: %s", u2)
49}
50```
51
52## Documentation
53
54[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project.
55
56## Links
57* [RFC 4122](http://tools.ietf.org/html/rfc4122)
58* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01)
59
60## Copyright
61
62Copyright (C) 2013-2016 by Maxim Bublis <b@codemonkey.ru>.
63
64UUID package released under MIT License.
65See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go
new file mode 100644
index 0000000..295f3fc
--- /dev/null
+++ b/vendor/github.com/satori/go.uuid/uuid.go
@@ -0,0 +1,481 @@
1// Copyright (C) 2013-2015 by Maxim Bublis <b@codemonkey.ru>
2//
3// Permission is hereby granted, free of charge, to any person obtaining
4// a copy of this software and associated documentation files (the
5// "Software"), to deal in the Software without restriction, including
6// without limitation the rights to use, copy, modify, merge, publish,
7// distribute, sublicense, and/or sell copies of the Software, and to
8// permit persons to whom the Software is furnished to do so, subject to
9// the following conditions:
10//
11// The above copyright notice and this permission notice shall be
12// included in all copies or substantial portions of the Software.
13//
14// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21
22// Package uuid provides implementation of Universally Unique Identifier (UUID).
23// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and
24// version 2 (as specified in DCE 1.1).
25package uuid
26
27import (
28 "bytes"
29 "crypto/md5"
30 "crypto/rand"
31 "crypto/sha1"
32 "database/sql/driver"
33 "encoding/binary"
34 "encoding/hex"
35 "fmt"
36 "hash"
37 "net"
38 "os"
39 "sync"
40 "time"
41)
42
43// UUID layout variants.
44const (
45 VariantNCS = iota
46 VariantRFC4122
47 VariantMicrosoft
48 VariantFuture
49)
50
51// UUID DCE domains.
52const (
53 DomainPerson = iota
54 DomainGroup
55 DomainOrg
56)
57
58// Difference in 100-nanosecond intervals between
59// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
60const epochStart = 122192928000000000
61
62// Used in string method conversion
63const dash byte = '-'
64
65// UUID v1/v2 storage.
66var (
67 storageMutex sync.Mutex
68 storageOnce sync.Once
69 epochFunc = unixTimeFunc
70 clockSequence uint16
71 lastTime uint64
72 hardwareAddr [6]byte
73 posixUID = uint32(os.Getuid())
74 posixGID = uint32(os.Getgid())
75)
76
77// String parse helpers.
78var (
79 urnPrefix = []byte("urn:uuid:")
80 byteGroups = []int{8, 4, 4, 4, 12}
81)
82
83func initClockSequence() {
84 buf := make([]byte, 2)
85 safeRandom(buf)
86 clockSequence = binary.BigEndian.Uint16(buf)
87}
88
89func initHardwareAddr() {
90 interfaces, err := net.Interfaces()
91 if err == nil {
92 for _, iface := range interfaces {
93 if len(iface.HardwareAddr) >= 6 {
94 copy(hardwareAddr[:], iface.HardwareAddr)
95 return
96 }
97 }
98 }
99
100 // Initialize hardwareAddr randomly in case
101 // of real network interfaces absence
102 safeRandom(hardwareAddr[:])
103
104 // Set multicast bit as recommended in RFC 4122
105 hardwareAddr[0] |= 0x01
106}
107
108func initStorage() {
109 initClockSequence()
110 initHardwareAddr()
111}
112
113func safeRandom(dest []byte) {
114 if _, err := rand.Read(dest); err != nil {
115 panic(err)
116 }
117}
118
119// Returns difference in 100-nanosecond intervals between
120// UUID epoch (October 15, 1582) and current time.
121// This is default epoch calculation function.
122func unixTimeFunc() uint64 {
123 return epochStart + uint64(time.Now().UnixNano()/100)
124}
125
126// UUID representation compliant with specification
127// described in RFC 4122.
128type UUID [16]byte
129
130// NullUUID can be used with the standard sql package to represent a
131// UUID value that can be NULL in the database
132type NullUUID struct {
133 UUID UUID
134 Valid bool
135}
136
137// The nil UUID is special form of UUID that is specified to have all
138// 128 bits set to zero.
139var Nil = UUID{}
140
141// Predefined namespace UUIDs.
142var (
143 NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
144 NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
145 NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
146 NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
147)
148
149// And returns result of binary AND of two UUIDs.
150func And(u1 UUID, u2 UUID) UUID {
151 u := UUID{}
152 for i := 0; i < 16; i++ {
153 u[i] = u1[i] & u2[i]
154 }
155 return u
156}
157
158// Or returns result of binary OR of two UUIDs.
159func Or(u1 UUID, u2 UUID) UUID {
160 u := UUID{}
161 for i := 0; i < 16; i++ {
162 u[i] = u1[i] | u2[i]
163 }
164 return u
165}
166
167// Equal returns true if u1 and u2 equals, otherwise returns false.
168func Equal(u1 UUID, u2 UUID) bool {
169 return bytes.Equal(u1[:], u2[:])
170}
171
172// Version returns algorithm version used to generate UUID.
173func (u UUID) Version() uint {
174 return uint(u[6] >> 4)
175}
176
177// Variant returns UUID layout variant.
178func (u UUID) Variant() uint {
179 switch {
180 case (u[8] & 0x80) == 0x00:
181 return VariantNCS
182 case (u[8]&0xc0)|0x80 == 0x80:
183 return VariantRFC4122
184 case (u[8]&0xe0)|0xc0 == 0xc0:
185 return VariantMicrosoft
186 }
187 return VariantFuture
188}
189
190// Bytes returns bytes slice representation of UUID.
191func (u UUID) Bytes() []byte {
192 return u[:]
193}
194
195// Returns canonical string representation of UUID:
196// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
197func (u UUID) String() string {
198 buf := make([]byte, 36)
199
200 hex.Encode(buf[0:8], u[0:4])
201 buf[8] = dash
202 hex.Encode(buf[9:13], u[4:6])
203 buf[13] = dash
204 hex.Encode(buf[14:18], u[6:8])
205 buf[18] = dash
206 hex.Encode(buf[19:23], u[8:10])
207 buf[23] = dash
208 hex.Encode(buf[24:], u[10:])
209
210 return string(buf)
211}
212
213// SetVersion sets version bits.
214func (u *UUID) SetVersion(v byte) {
215 u[6] = (u[6] & 0x0f) | (v << 4)
216}
217
218// SetVariant sets variant bits as described in RFC 4122.
219func (u *UUID) SetVariant() {
220 u[8] = (u[8] & 0xbf) | 0x80
221}
222
223// MarshalText implements the encoding.TextMarshaler interface.
224// The encoding is the same as returned by String.
225func (u UUID) MarshalText() (text []byte, err error) {
226 text = []byte(u.String())
227 return
228}
229
230// UnmarshalText implements the encoding.TextUnmarshaler interface.
231// Following formats are supported:
232// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
233// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
234// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
235func (u *UUID) UnmarshalText(text []byte) (err error) {
236 if len(text) < 32 {
237 err = fmt.Errorf("uuid: UUID string too short: %s", text)
238 return
239 }
240
241 t := text[:]
242 braced := false
243
244 if bytes.Equal(t[:9], urnPrefix) {
245 t = t[9:]
246 } else if t[0] == '{' {
247 braced = true
248 t = t[1:]
249 }
250
251 b := u[:]
252
253 for i, byteGroup := range byteGroups {
254 if i > 0 {
255 if t[0] != '-' {
256 err = fmt.Errorf("uuid: invalid string format")
257 return
258 }
259 t = t[1:]
260 }
261
262 if len(t) < byteGroup {
263 err = fmt.Errorf("uuid: UUID string too short: %s", text)
264 return
265 }
266
267 if i == 4 && len(t) > byteGroup &&
268 ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) {
269 err = fmt.Errorf("uuid: UUID string too long: %s", text)
270 return
271 }
272
273 _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup])
274 if err != nil {
275 return
276 }
277
278 t = t[byteGroup:]
279 b = b[byteGroup/2:]
280 }
281
282 return
283}
284
285// MarshalBinary implements the encoding.BinaryMarshaler interface.
286func (u UUID) MarshalBinary() (data []byte, err error) {
287 data = u.Bytes()
288 return
289}
290
291// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
292// It will return error if the slice isn't 16 bytes long.
293func (u *UUID) UnmarshalBinary(data []byte) (err error) {
294 if len(data) != 16 {
295 err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
296 return
297 }
298 copy(u[:], data)
299
300 return
301}
302
303// Value implements the driver.Valuer interface.
304func (u UUID) Value() (driver.Value, error) {
305 return u.String(), nil
306}
307
308// Scan implements the sql.Scanner interface.
309// A 16-byte slice is handled by UnmarshalBinary, while
310// a longer byte slice or a string is handled by UnmarshalText.
311func (u *UUID) Scan(src interface{}) error {
312 switch src := src.(type) {
313 case []byte:
314 if len(src) == 16 {
315 return u.UnmarshalBinary(src)
316 }
317 return u.UnmarshalText(src)
318
319 case string:
320 return u.UnmarshalText([]byte(src))
321 }
322
323 return fmt.Errorf("uuid: cannot convert %T to UUID", src)
324}
325
326// Value implements the driver.Valuer interface.
327func (u NullUUID) Value() (driver.Value, error) {
328 if !u.Valid {
329 return nil, nil
330 }
331 // Delegate to UUID Value function
332 return u.UUID.Value()
333}
334
335// Scan implements the sql.Scanner interface.
336func (u *NullUUID) Scan(src interface{}) error {
337 if src == nil {
338 u.UUID, u.Valid = Nil, false
339 return nil
340 }
341
342 // Delegate to UUID Scan function
343 u.Valid = true
344 return u.UUID.Scan(src)
345}
346
347// FromBytes returns UUID converted from raw byte slice input.
348// It will return error if the slice isn't 16 bytes long.
349func FromBytes(input []byte) (u UUID, err error) {
350 err = u.UnmarshalBinary(input)
351 return
352}
353
354// FromBytesOrNil returns UUID converted from raw byte slice input.
355// Same behavior as FromBytes, but returns a Nil UUID on error.
356func FromBytesOrNil(input []byte) UUID {
357 uuid, err := FromBytes(input)
358 if err != nil {
359 return Nil
360 }
361 return uuid
362}
363
364// FromString returns UUID parsed from string input.
365// Input is expected in a form accepted by UnmarshalText.
366func FromString(input string) (u UUID, err error) {
367 err = u.UnmarshalText([]byte(input))
368 return
369}
370
371// FromStringOrNil returns UUID parsed from string input.
372// Same behavior as FromString, but returns a Nil UUID on error.
373func FromStringOrNil(input string) UUID {
374 uuid, err := FromString(input)
375 if err != nil {
376 return Nil
377 }
378 return uuid
379}
380
381// Returns UUID v1/v2 storage state.
382// Returns epoch timestamp, clock sequence, and hardware address.
383func getStorage() (uint64, uint16, []byte) {
384 storageOnce.Do(initStorage)
385
386 storageMutex.Lock()
387 defer storageMutex.Unlock()
388
389 timeNow := epochFunc()
390 // Clock changed backwards since last UUID generation.
391 // Should increase clock sequence.
392 if timeNow <= lastTime {
393 clockSequence++
394 }
395 lastTime = timeNow
396
397 return timeNow, clockSequence, hardwareAddr[:]
398}
399
400// NewV1 returns UUID based on current timestamp and MAC address.
401func NewV1() UUID {
402 u := UUID{}
403
404 timeNow, clockSeq, hardwareAddr := getStorage()
405
406 binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
407 binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
408 binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
409 binary.BigEndian.PutUint16(u[8:], clockSeq)
410
411 copy(u[10:], hardwareAddr)
412
413 u.SetVersion(1)
414 u.SetVariant()
415
416 return u
417}
418
419// NewV2 returns DCE Security UUID based on POSIX UID/GID.
420func NewV2(domain byte) UUID {
421 u := UUID{}
422
423 timeNow, clockSeq, hardwareAddr := getStorage()
424
425 switch domain {
426 case DomainPerson:
427 binary.BigEndian.PutUint32(u[0:], posixUID)
428 case DomainGroup:
429 binary.BigEndian.PutUint32(u[0:], posixGID)
430 }
431
432 binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
433 binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
434 binary.BigEndian.PutUint16(u[8:], clockSeq)
435 u[9] = domain
436
437 copy(u[10:], hardwareAddr)
438
439 u.SetVersion(2)
440 u.SetVariant()
441
442 return u
443}
444
445// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
446func NewV3(ns UUID, name string) UUID {
447 u := newFromHash(md5.New(), ns, name)
448 u.SetVersion(3)
449 u.SetVariant()
450
451 return u
452}
453
454// NewV4 returns random generated UUID.
455func NewV4() UUID {
456 u := UUID{}
457 safeRandom(u[:])
458 u.SetVersion(4)
459 u.SetVariant()
460
461 return u
462}
463
464// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
465func NewV5(ns UUID, name string) UUID {
466 u := newFromHash(sha1.New(), ns, name)
467 u.SetVersion(5)
468 u.SetVariant()
469
470 return u
471}
472
473// Returns UUID based on hashing of namespace UUID and name.
474func newFromHash(h hash.Hash, ns UUID, name string) UUID {
475 u := UUID{}
476 h.Write(ns[:])
477 h.Write([]byte(name))
478 copy(u[:], h.Sum(nil))
479
480 return u
481}
diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/crypto/LICENSE
@@ -0,0 +1,27 @@
1Copyright (c) 2009 The Go Authors. All rights reserved.
2
3Redistribution and use in source and binary forms, with or without
4modification, are permitted provided that the following conditions are
5met:
6
7 * Redistributions of source code must retain the above copyright
8notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above
10copyright notice, this list of conditions and the following disclaimer
11in the documentation and/or other materials provided with the
12distribution.
13 * Neither the name of Google Inc. nor the names of its
14contributors may be used to endorse or promote products derived from
15this software without specific prior written permission.
16
17THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/crypto/PATENTS
@@ -0,0 +1,22 @@
1Additional IP Rights Grant (Patents)
2
3"This implementation" means the copyrightable works distributed by
4Google as part of the Go project.
5
6Google hereby grants to You a perpetual, worldwide, non-exclusive,
7no-charge, royalty-free, irrevocable (except as stated in this section)
8patent license to make, have made, use, offer to sell, sell, import,
9transfer and otherwise run, modify and propagate the contents of this
10implementation of Go, where such license applies only to those patent
11claims, both currently owned or controlled by Google and acquired in
12the future, licensable by Google that are necessarily infringed by this
13implementation of Go. This grant does not include claims that would be
14infringed only as a consequence of further modification of this
15implementation. If you or your agent or exclusive licensee institute or
16order or agree to the institution of patent litigation against any
17entity (including a cross-claim or counterclaim in a lawsuit) alleging
18that this implementation of Go or any code incorporated within this
19implementation of Go constitutes direct or contributory patent
20infringement, or inducement of patent infringement, then any patent
21rights granted to you under this License for this implementation of Go
22shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h
new file mode 100644
index 0000000..80ad222
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.h
@@ -0,0 +1,8 @@
1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// This code was translated into a form compatible with 6a from the public
6// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
7
8#define REDMASK51 0x0007FFFFFFFFFFFF
diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s
new file mode 100644
index 0000000..0ad5398
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.s
@@ -0,0 +1,20 @@
1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// This code was translated into a form compatible with 6a from the public
6// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
7
8// +build amd64,!gccgo,!appengine
9
10// These constants cannot be encoded in non-MOVQ immediates.
11// We access them directly from memory instead.
12
13DATA ·_121666_213(SB)/8, $996687872
14GLOBL ·_121666_213(SB), 8, $8
15
16DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
17GLOBL ·_2P0(SB), 8, $8
18
19DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
20GLOBL ·_2P1234(SB), 8, $8
diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
new file mode 100644
index 0000000..45484d1
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
@@ -0,0 +1,88 @@
1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// This code was translated into a form compatible with 6a from the public
6// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
7
8// +build amd64,!gccgo,!appengine
9
10// func cswap(inout *[5]uint64, v uint64)
11TEXT ·cswap(SB),7,$0
12 MOVQ inout+0(FP),DI
13 MOVQ v+8(FP),SI
14
15 CMPQ SI,$1
16 MOVQ 0(DI),SI
17 MOVQ 80(DI),DX
18 MOVQ 8(DI),CX
19 MOVQ 88(DI),R8
20 MOVQ SI,R9
21 CMOVQEQ DX,SI
22 CMOVQEQ R9,DX
23 MOVQ CX,R9
24 CMOVQEQ R8,CX
25 CMOVQEQ R9,R8
26 MOVQ SI,0(DI)
27 MOVQ DX,80(DI)
28 MOVQ CX,8(DI)
29 MOVQ R8,88(DI)
30 MOVQ 16(DI),SI
31 MOVQ 96(DI),DX
32 MOVQ 24(DI),CX
33 MOVQ 104(DI),R8
34 MOVQ SI,R9
35 CMOVQEQ DX,SI
36 CMOVQEQ R9,DX
37 MOVQ CX,R9
38 CMOVQEQ R8,CX
39 CMOVQEQ R9,R8
40 MOVQ SI,16(DI)
41 MOVQ DX,96(DI)
42 MOVQ CX,24(DI)
43 MOVQ R8,104(DI)
44 MOVQ 32(DI),SI
45 MOVQ 112(DI),DX
46 MOVQ 40(DI),CX
47 MOVQ 120(DI),R8
48 MOVQ SI,R9
49 CMOVQEQ DX,SI
50 CMOVQEQ R9,DX
51 MOVQ CX,R9
52 CMOVQEQ R8,CX
53 CMOVQEQ R9,R8
54 MOVQ SI,32(DI)
55 MOVQ DX,112(DI)
56 MOVQ CX,40(DI)
57 MOVQ R8,120(DI)
58 MOVQ 48(DI),SI
59 MOVQ 128(DI),DX
60 MOVQ 56(DI),CX
61 MOVQ 136(DI),R8
62 MOVQ SI,R9
63 CMOVQEQ DX,SI
64 CMOVQEQ R9,DX
65 MOVQ CX,R9
66 CMOVQEQ R8,CX
67 CMOVQEQ R9,R8
68 MOVQ SI,48(DI)
69 MOVQ DX,128(DI)
70 MOVQ CX,56(DI)
71 MOVQ R8,136(DI)
72 MOVQ 64(DI),SI
73 MOVQ 144(DI),DX
74 MOVQ 72(DI),CX
75 MOVQ 152(DI),R8
76 MOVQ SI,R9
77 CMOVQEQ DX,SI
78 CMOVQEQ R9,DX
79 MOVQ CX,R9
80 CMOVQEQ R8,CX
81 CMOVQEQ R9,R8
82 MOVQ SI,64(DI)
83 MOVQ DX,144(DI)
84 MOVQ CX,72(DI)
85 MOVQ R8,152(DI)
86 MOVQ DI,AX
87 MOVQ SI,DX
88 RET
diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go
new file mode 100644
index 0000000..6918c47
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go
@@ -0,0 +1,841 @@
1// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// We have a implementation in amd64 assembly so this code is only run on
6// non-amd64 platforms. The amd64 assembly does not support gccgo.
7// +build !amd64 gccgo appengine
8
9package curve25519
10
11// This code is a port of the public domain, "ref10" implementation of
12// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
13
14// fieldElement represents an element of the field GF(2^255 - 19). An element
15// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
16// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
17// context.
18type fieldElement [10]int32
19
20func feZero(fe *fieldElement) {
21 for i := range fe {
22 fe[i] = 0
23 }
24}
25
26func feOne(fe *fieldElement) {
27 feZero(fe)
28 fe[0] = 1
29}
30
31func feAdd(dst, a, b *fieldElement) {
32 for i := range dst {
33 dst[i] = a[i] + b[i]
34 }
35}
36
37func feSub(dst, a, b *fieldElement) {
38 for i := range dst {
39 dst[i] = a[i] - b[i]
40 }
41}
42
43func feCopy(dst, src *fieldElement) {
44 for i := range dst {
45 dst[i] = src[i]
46 }
47}
48
49// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
50//
51// Preconditions: b in {0,1}.
52func feCSwap(f, g *fieldElement, b int32) {
53 var x fieldElement
54 b = -b
55 for i := range x {
56 x[i] = b & (f[i] ^ g[i])
57 }
58
59 for i := range f {
60 f[i] ^= x[i]
61 }
62 for i := range g {
63 g[i] ^= x[i]
64 }
65}
66
67// load3 reads a 24-bit, little-endian value from in.
68func load3(in []byte) int64 {
69 var r int64
70 r = int64(in[0])
71 r |= int64(in[1]) << 8
72 r |= int64(in[2]) << 16
73 return r
74}
75
76// load4 reads a 32-bit, little-endian value from in.
77func load4(in []byte) int64 {
78 var r int64
79 r = int64(in[0])
80 r |= int64(in[1]) << 8
81 r |= int64(in[2]) << 16
82 r |= int64(in[3]) << 24
83 return r
84}
85
86func feFromBytes(dst *fieldElement, src *[32]byte) {
87 h0 := load4(src[:])
88 h1 := load3(src[4:]) << 6
89 h2 := load3(src[7:]) << 5
90 h3 := load3(src[10:]) << 3
91 h4 := load3(src[13:]) << 2
92 h5 := load4(src[16:])
93 h6 := load3(src[20:]) << 7
94 h7 := load3(src[23:]) << 5
95 h8 := load3(src[26:]) << 4
96 h9 := load3(src[29:]) << 2
97
98 var carry [10]int64
99 carry[9] = (h9 + 1<<24) >> 25
100 h0 += carry[9] * 19
101 h9 -= carry[9] << 25
102 carry[1] = (h1 + 1<<24) >> 25
103 h2 += carry[1]
104 h1 -= carry[1] << 25
105 carry[3] = (h3 + 1<<24) >> 25
106 h4 += carry[3]
107 h3 -= carry[3] << 25
108 carry[5] = (h5 + 1<<24) >> 25
109 h6 += carry[5]
110 h5 -= carry[5] << 25
111 carry[7] = (h7 + 1<<24) >> 25
112 h8 += carry[7]
113 h7 -= carry[7] << 25
114
115 carry[0] = (h0 + 1<<25) >> 26
116 h1 += carry[0]
117 h0 -= carry[0] << 26
118 carry[2] = (h2 + 1<<25) >> 26
119 h3 += carry[2]
120 h2 -= carry[2] << 26
121 carry[4] = (h4 + 1<<25) >> 26
122 h5 += carry[4]
123 h4 -= carry[4] << 26
124 carry[6] = (h6 + 1<<25) >> 26
125 h7 += carry[6]
126 h6 -= carry[6] << 26
127 carry[8] = (h8 + 1<<25) >> 26
128 h9 += carry[8]
129 h8 -= carry[8] << 26
130
131 dst[0] = int32(h0)
132 dst[1] = int32(h1)
133 dst[2] = int32(h2)
134 dst[3] = int32(h3)
135 dst[4] = int32(h4)
136 dst[5] = int32(h5)
137 dst[6] = int32(h6)
138 dst[7] = int32(h7)
139 dst[8] = int32(h8)
140 dst[9] = int32(h9)
141}
142
143// feToBytes marshals h to s.
144// Preconditions:
145// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
146//
147// Write p=2^255-19; q=floor(h/p).
148// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
149//
150// Proof:
151// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
152// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
153//
154// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
155// Then 0<y<1.
156//
157// Write r=h-pq.
158// Have 0<=r<=p-1=2^255-20.
159// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
160//
161// Write x=r+19(2^-255)r+y.
162// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
163//
164// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
165// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
166func feToBytes(s *[32]byte, h *fieldElement) {
167 var carry [10]int32
168
169 q := (19*h[9] + (1 << 24)) >> 25
170 q = (h[0] + q) >> 26
171 q = (h[1] + q) >> 25
172 q = (h[2] + q) >> 26
173 q = (h[3] + q) >> 25
174 q = (h[4] + q) >> 26
175 q = (h[5] + q) >> 25
176 q = (h[6] + q) >> 26
177 q = (h[7] + q) >> 25
178 q = (h[8] + q) >> 26
179 q = (h[9] + q) >> 25
180
181 // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
182 h[0] += 19 * q
183 // Goal: Output h-2^255 q, which is between 0 and 2^255-20.
184
185 carry[0] = h[0] >> 26
186 h[1] += carry[0]
187 h[0] -= carry[0] << 26
188 carry[1] = h[1] >> 25
189 h[2] += carry[1]
190 h[1] -= carry[1] << 25
191 carry[2] = h[2] >> 26
192 h[3] += carry[2]
193 h[2] -= carry[2] << 26
194 carry[3] = h[3] >> 25
195 h[4] += carry[3]
196 h[3] -= carry[3] << 25
197 carry[4] = h[4] >> 26
198 h[5] += carry[4]
199 h[4] -= carry[4] << 26
200 carry[5] = h[5] >> 25
201 h[6] += carry[5]
202 h[5] -= carry[5] << 25
203 carry[6] = h[6] >> 26
204 h[7] += carry[6]
205 h[6] -= carry[6] << 26
206 carry[7] = h[7] >> 25
207 h[8] += carry[7]
208 h[7] -= carry[7] << 25
209 carry[8] = h[8] >> 26
210 h[9] += carry[8]
211 h[8] -= carry[8] << 26
212 carry[9] = h[9] >> 25
213 h[9] -= carry[9] << 25
214 // h10 = carry9
215
216 // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
217 // Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
218 // evidently 2^255 h10-2^255 q = 0.
219 // Goal: Output h[0]+...+2^230 h[9].
220
221 s[0] = byte(h[0] >> 0)
222 s[1] = byte(h[0] >> 8)
223 s[2] = byte(h[0] >> 16)
224 s[3] = byte((h[0] >> 24) | (h[1] << 2))
225 s[4] = byte(h[1] >> 6)
226 s[5] = byte(h[1] >> 14)
227 s[6] = byte((h[1] >> 22) | (h[2] << 3))
228 s[7] = byte(h[2] >> 5)
229 s[8] = byte(h[2] >> 13)
230 s[9] = byte((h[2] >> 21) | (h[3] << 5))
231 s[10] = byte(h[3] >> 3)
232 s[11] = byte(h[3] >> 11)
233 s[12] = byte((h[3] >> 19) | (h[4] << 6))
234 s[13] = byte(h[4] >> 2)
235 s[14] = byte(h[4] >> 10)
236 s[15] = byte(h[4] >> 18)
237 s[16] = byte(h[5] >> 0)
238 s[17] = byte(h[5] >> 8)
239 s[18] = byte(h[5] >> 16)
240 s[19] = byte((h[5] >> 24) | (h[6] << 1))
241 s[20] = byte(h[6] >> 7)
242 s[21] = byte(h[6] >> 15)
243 s[22] = byte((h[6] >> 23) | (h[7] << 3))
244 s[23] = byte(h[7] >> 5)
245 s[24] = byte(h[7] >> 13)
246 s[25] = byte((h[7] >> 21) | (h[8] << 4))
247 s[26] = byte(h[8] >> 4)
248 s[27] = byte(h[8] >> 12)
249 s[28] = byte((h[8] >> 20) | (h[9] << 6))
250 s[29] = byte(h[9] >> 2)
251 s[30] = byte(h[9] >> 10)
252 s[31] = byte(h[9] >> 18)
253}
254
255// feMul calculates h = f * g
256// Can overlap h with f or g.
257//
258// Preconditions:
259// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
260// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
261//
262// Postconditions:
263// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
264//
265// Notes on implementation strategy:
266//
267// Using schoolbook multiplication.
268// Karatsuba would save a little in some cost models.
269//
270// Most multiplications by 2 and 19 are 32-bit precomputations;
271// cheaper than 64-bit postcomputations.
272//
273// There is one remaining multiplication by 19 in the carry chain;
274// one *19 precomputation can be merged into this,
275// but the resulting data flow is considerably less clean.
276//
277// There are 12 carries below.
278// 10 of them are 2-way parallelizable and vectorizable.
279// Can get away with 11 carries, but then data flow is much deeper.
280//
281// With tighter constraints on inputs can squeeze carries into int32.
282func feMul(h, f, g *fieldElement) {
283 f0 := f[0]
284 f1 := f[1]
285 f2 := f[2]
286 f3 := f[3]
287 f4 := f[4]
288 f5 := f[5]
289 f6 := f[6]
290 f7 := f[7]
291 f8 := f[8]
292 f9 := f[9]
293 g0 := g[0]
294 g1 := g[1]
295 g2 := g[2]
296 g3 := g[3]
297 g4 := g[4]
298 g5 := g[5]
299 g6 := g[6]
300 g7 := g[7]
301 g8 := g[8]
302 g9 := g[9]
303 g1_19 := 19 * g1 // 1.4*2^29
304 g2_19 := 19 * g2 // 1.4*2^30; still ok
305 g3_19 := 19 * g3
306 g4_19 := 19 * g4
307 g5_19 := 19 * g5
308 g6_19 := 19 * g6
309 g7_19 := 19 * g7
310 g8_19 := 19 * g8
311 g9_19 := 19 * g9
312 f1_2 := 2 * f1
313 f3_2 := 2 * f3
314 f5_2 := 2 * f5
315 f7_2 := 2 * f7
316 f9_2 := 2 * f9
317 f0g0 := int64(f0) * int64(g0)
318 f0g1 := int64(f0) * int64(g1)
319 f0g2 := int64(f0) * int64(g2)
320 f0g3 := int64(f0) * int64(g3)
321 f0g4 := int64(f0) * int64(g4)
322 f0g5 := int64(f0) * int64(g5)
323 f0g6 := int64(f0) * int64(g6)
324 f0g7 := int64(f0) * int64(g7)
325 f0g8 := int64(f0) * int64(g8)
326 f0g9 := int64(f0) * int64(g9)
327 f1g0 := int64(f1) * int64(g0)
328 f1g1_2 := int64(f1_2) * int64(g1)
329 f1g2 := int64(f1) * int64(g2)
330 f1g3_2 := int64(f1_2) * int64(g3)
331 f1g4 := int64(f1) * int64(g4)
332 f1g5_2 := int64(f1_2) * int64(g5)
333 f1g6 := int64(f1) * int64(g6)
334 f1g7_2 := int64(f1_2) * int64(g7)
335 f1g8 := int64(f1) * int64(g8)
336 f1g9_38 := int64(f1_2) * int64(g9_19)
337 f2g0 := int64(f2) * int64(g0)
338 f2g1 := int64(f2) * int64(g1)
339 f2g2 := int64(f2) * int64(g2)
340 f2g3 := int64(f2) * int64(g3)
341 f2g4 := int64(f2) * int64(g4)
342 f2g5 := int64(f2) * int64(g5)
343 f2g6 := int64(f2) * int64(g6)
344 f2g7 := int64(f2) * int64(g7)
345 f2g8_19 := int64(f2) * int64(g8_19)
346 f2g9_19 := int64(f2) * int64(g9_19)
347 f3g0 := int64(f3) * int64(g0)
348 f3g1_2 := int64(f3_2) * int64(g1)
349 f3g2 := int64(f3) * int64(g2)
350 f3g3_2 := int64(f3_2) * int64(g3)
351 f3g4 := int64(f3) * int64(g4)
352 f3g5_2 := int64(f3_2) * int64(g5)
353 f3g6 := int64(f3) * int64(g6)
354 f3g7_38 := int64(f3_2) * int64(g7_19)
355 f3g8_19 := int64(f3) * int64(g8_19)
356 f3g9_38 := int64(f3_2) * int64(g9_19)
357 f4g0 := int64(f4) * int64(g0)
358 f4g1 := int64(f4) * int64(g1)
359 f4g2 := int64(f4) * int64(g2)
360 f4g3 := int64(f4) * int64(g3)
361 f4g4 := int64(f4) * int64(g4)
362 f4g5 := int64(f4) * int64(g5)
363 f4g6_19 := int64(f4) * int64(g6_19)
364 f4g7_19 := int64(f4) * int64(g7_19)
365 f4g8_19 := int64(f4) * int64(g8_19)
366 f4g9_19 := int64(f4) * int64(g9_19)
367 f5g0 := int64(f5) * int64(g0)
368 f5g1_2 := int64(f5_2) * int64(g1)
369 f5g2 := int64(f5) * int64(g2)
370 f5g3_2 := int64(f5_2) * int64(g3)
371 f5g4 := int64(f5) * int64(g4)
372 f5g5_38 := int64(f5_2) * int64(g5_19)
373 f5g6_19 := int64(f5) * int64(g6_19)
374 f5g7_38 := int64(f5_2) * int64(g7_19)
375 f5g8_19 := int64(f5) * int64(g8_19)
376 f5g9_38 := int64(f5_2) * int64(g9_19)
377 f6g0 := int64(f6) * int64(g0)
378 f6g1 := int64(f6) * int64(g1)
379 f6g2 := int64(f6) * int64(g2)
380 f6g3 := int64(f6) * int64(g3)
381 f6g4_19 := int64(f6) * int64(g4_19)
382 f6g5_19 := int64(f6) * int64(g5_19)
383 f6g6_19 := int64(f6) * int64(g6_19)
384 f6g7_19 := int64(f6) * int64(g7_19)
385 f6g8_19 := int64(f6) * int64(g8_19)
386 f6g9_19 := int64(f6) * int64(g9_19)
387 f7g0 := int64(f7) * int64(g0)
388 f7g1_2 := int64(f7_2) * int64(g1)
389 f7g2 := int64(f7) * int64(g2)
390 f7g3_38 := int64(f7_2) * int64(g3_19)
391 f7g4_19 := int64(f7) * int64(g4_19)
392 f7g5_38 := int64(f7_2) * int64(g5_19)
393 f7g6_19 := int64(f7) * int64(g6_19)
394 f7g7_38 := int64(f7_2) * int64(g7_19)
395 f7g8_19 := int64(f7) * int64(g8_19)
396 f7g9_38 := int64(f7_2) * int64(g9_19)
397 f8g0 := int64(f8) * int64(g0)
398 f8g1 := int64(f8) * int64(g1)
399 f8g2_19 := int64(f8) * int64(g2_19)
400 f8g3_19 := int64(f8) * int64(g3_19)
401 f8g4_19 := int64(f8) * int64(g4_19)
402 f8g5_19 := int64(f8) * int64(g5_19)
403 f8g6_19 := int64(f8) * int64(g6_19)
404 f8g7_19 := int64(f8) * int64(g7_19)
405 f8g8_19 := int64(f8) * int64(g8_19)
406 f8g9_19 := int64(f8) * int64(g9_19)
407 f9g0 := int64(f9) * int64(g0)
408 f9g1_38 := int64(f9_2) * int64(g1_19)
409 f9g2_19 := int64(f9) * int64(g2_19)
410 f9g3_38 := int64(f9_2) * int64(g3_19)
411 f9g4_19 := int64(f9) * int64(g4_19)
412 f9g5_38 := int64(f9_2) * int64(g5_19)
413 f9g6_19 := int64(f9) * int64(g6_19)
414 f9g7_38 := int64(f9_2) * int64(g7_19)
415 f9g8_19 := int64(f9) * int64(g8_19)
416 f9g9_38 := int64(f9_2) * int64(g9_19)
417 h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
418 h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
419 h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
420 h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
421 h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
422 h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
423 h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
424 h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
425 h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
426 h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
427 var carry [10]int64
428
429 // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
430 // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
431 // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
432 // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
433
434 carry[0] = (h0 + (1 << 25)) >> 26
435 h1 += carry[0]
436 h0 -= carry[0] << 26
437 carry[4] = (h4 + (1 << 25)) >> 26
438 h5 += carry[4]
439 h4 -= carry[4] << 26
440 // |h0| <= 2^25
441 // |h4| <= 2^25
442 // |h1| <= 1.51*2^58
443 // |h5| <= 1.51*2^58
444
445 carry[1] = (h1 + (1 << 24)) >> 25
446 h2 += carry[1]
447 h1 -= carry[1] << 25
448 carry[5] = (h5 + (1 << 24)) >> 25
449 h6 += carry[5]
450 h5 -= carry[5] << 25
451 // |h1| <= 2^24; from now on fits into int32
452 // |h5| <= 2^24; from now on fits into int32
453 // |h2| <= 1.21*2^59
454 // |h6| <= 1.21*2^59
455
456 carry[2] = (h2 + (1 << 25)) >> 26
457 h3 += carry[2]
458 h2 -= carry[2] << 26
459 carry[6] = (h6 + (1 << 25)) >> 26
460 h7 += carry[6]
461 h6 -= carry[6] << 26
462 // |h2| <= 2^25; from now on fits into int32 unchanged
463 // |h6| <= 2^25; from now on fits into int32 unchanged
464 // |h3| <= 1.51*2^58
465 // |h7| <= 1.51*2^58
466
467 carry[3] = (h3 + (1 << 24)) >> 25
468 h4 += carry[3]
469 h3 -= carry[3] << 25
470 carry[7] = (h7 + (1 << 24)) >> 25
471 h8 += carry[7]
472 h7 -= carry[7] << 25
473 // |h3| <= 2^24; from now on fits into int32 unchanged
474 // |h7| <= 2^24; from now on fits into int32 unchanged
475 // |h4| <= 1.52*2^33
476 // |h8| <= 1.52*2^33
477
478 carry[4] = (h4 + (1 << 25)) >> 26
479 h5 += carry[4]
480 h4 -= carry[4] << 26
481 carry[8] = (h8 + (1 << 25)) >> 26
482 h9 += carry[8]
483 h8 -= carry[8] << 26
484 // |h4| <= 2^25; from now on fits into int32 unchanged
485 // |h8| <= 2^25; from now on fits into int32 unchanged
486 // |h5| <= 1.01*2^24
487 // |h9| <= 1.51*2^58
488
489 carry[9] = (h9 + (1 << 24)) >> 25
490 h0 += carry[9] * 19
491 h9 -= carry[9] << 25
492 // |h9| <= 2^24; from now on fits into int32 unchanged
493 // |h0| <= 1.8*2^37
494
495 carry[0] = (h0 + (1 << 25)) >> 26
496 h1 += carry[0]
497 h0 -= carry[0] << 26
498 // |h0| <= 2^25; from now on fits into int32 unchanged
499 // |h1| <= 1.01*2^24
500
501 h[0] = int32(h0)
502 h[1] = int32(h1)
503 h[2] = int32(h2)
504 h[3] = int32(h3)
505 h[4] = int32(h4)
506 h[5] = int32(h5)
507 h[6] = int32(h6)
508 h[7] = int32(h7)
509 h[8] = int32(h8)
510 h[9] = int32(h9)
511}
512
513// feSquare calculates h = f*f. Can overlap h with f.
514//
515// Preconditions:
516// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
517//
518// Postconditions:
519// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
520func feSquare(h, f *fieldElement) {
521 f0 := f[0]
522 f1 := f[1]
523 f2 := f[2]
524 f3 := f[3]
525 f4 := f[4]
526 f5 := f[5]
527 f6 := f[6]
528 f7 := f[7]
529 f8 := f[8]
530 f9 := f[9]
531 f0_2 := 2 * f0
532 f1_2 := 2 * f1
533 f2_2 := 2 * f2
534 f3_2 := 2 * f3
535 f4_2 := 2 * f4
536 f5_2 := 2 * f5
537 f6_2 := 2 * f6
538 f7_2 := 2 * f7
539 f5_38 := 38 * f5 // 1.31*2^30
540 f6_19 := 19 * f6 // 1.31*2^30
541 f7_38 := 38 * f7 // 1.31*2^30
542 f8_19 := 19 * f8 // 1.31*2^30
543 f9_38 := 38 * f9 // 1.31*2^30
544 f0f0 := int64(f0) * int64(f0)
545 f0f1_2 := int64(f0_2) * int64(f1)
546 f0f2_2 := int64(f0_2) * int64(f2)
547 f0f3_2 := int64(f0_2) * int64(f3)
548 f0f4_2 := int64(f0_2) * int64(f4)
549 f0f5_2 := int64(f0_2) * int64(f5)
550 f0f6_2 := int64(f0_2) * int64(f6)
551 f0f7_2 := int64(f0_2) * int64(f7)
552 f0f8_2 := int64(f0_2) * int64(f8)
553 f0f9_2 := int64(f0_2) * int64(f9)
554 f1f1_2 := int64(f1_2) * int64(f1)
555 f1f2_2 := int64(f1_2) * int64(f2)
556 f1f3_4 := int64(f1_2) * int64(f3_2)
557 f1f4_2 := int64(f1_2) * int64(f4)
558 f1f5_4 := int64(f1_2) * int64(f5_2)
559 f1f6_2 := int64(f1_2) * int64(f6)
560 f1f7_4 := int64(f1_2) * int64(f7_2)
561 f1f8_2 := int64(f1_2) * int64(f8)
562 f1f9_76 := int64(f1_2) * int64(f9_38)
563 f2f2 := int64(f2) * int64(f2)
564 f2f3_2 := int64(f2_2) * int64(f3)
565 f2f4_2 := int64(f2_2) * int64(f4)
566 f2f5_2 := int64(f2_2) * int64(f5)
567 f2f6_2 := int64(f2_2) * int64(f6)
568 f2f7_2 := int64(f2_2) * int64(f7)
569 f2f8_38 := int64(f2_2) * int64(f8_19)
570 f2f9_38 := int64(f2) * int64(f9_38)
571 f3f3_2 := int64(f3_2) * int64(f3)
572 f3f4_2 := int64(f3_2) * int64(f4)
573 f3f5_4 := int64(f3_2) * int64(f5_2)
574 f3f6_2 := int64(f3_2) * int64(f6)
575 f3f7_76 := int64(f3_2) * int64(f7_38)
576 f3f8_38 := int64(f3_2) * int64(f8_19)
577 f3f9_76 := int64(f3_2) * int64(f9_38)
578 f4f4 := int64(f4) * int64(f4)
579 f4f5_2 := int64(f4_2) * int64(f5)
580 f4f6_38 := int64(f4_2) * int64(f6_19)
581 f4f7_38 := int64(f4) * int64(f7_38)
582 f4f8_38 := int64(f4_2) * int64(f8_19)
583 f4f9_38 := int64(f4) * int64(f9_38)
584 f5f5_38 := int64(f5) * int64(f5_38)
585 f5f6_38 := int64(f5_2) * int64(f6_19)
586 f5f7_76 := int64(f5_2) * int64(f7_38)
587 f5f8_38 := int64(f5_2) * int64(f8_19)
588 f5f9_76 := int64(f5_2) * int64(f9_38)
589 f6f6_19 := int64(f6) * int64(f6_19)
590 f6f7_38 := int64(f6) * int64(f7_38)
591 f6f8_38 := int64(f6_2) * int64(f8_19)
592 f6f9_38 := int64(f6) * int64(f9_38)
593 f7f7_38 := int64(f7) * int64(f7_38)
594 f7f8_38 := int64(f7_2) * int64(f8_19)
595 f7f9_76 := int64(f7_2) * int64(f9_38)
596 f8f8_19 := int64(f8) * int64(f8_19)
597 f8f9_38 := int64(f8) * int64(f9_38)
598 f9f9_38 := int64(f9) * int64(f9_38)
599 h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
600 h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
601 h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
602 h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
603 h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
604 h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
605 h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
606 h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
607 h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
608 h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
609 var carry [10]int64
610
611 carry[0] = (h0 + (1 << 25)) >> 26
612 h1 += carry[0]
613 h0 -= carry[0] << 26
614 carry[4] = (h4 + (1 << 25)) >> 26
615 h5 += carry[4]
616 h4 -= carry[4] << 26
617
618 carry[1] = (h1 + (1 << 24)) >> 25
619 h2 += carry[1]
620 h1 -= carry[1] << 25
621 carry[5] = (h5 + (1 << 24)) >> 25
622 h6 += carry[5]
623 h5 -= carry[5] << 25
624
625 carry[2] = (h2 + (1 << 25)) >> 26
626 h3 += carry[2]
627 h2 -= carry[2] << 26
628 carry[6] = (h6 + (1 << 25)) >> 26
629 h7 += carry[6]
630 h6 -= carry[6] << 26
631
632 carry[3] = (h3 + (1 << 24)) >> 25
633 h4 += carry[3]
634 h3 -= carry[3] << 25
635 carry[7] = (h7 + (1 << 24)) >> 25
636 h8 += carry[7]
637 h7 -= carry[7] << 25
638
639 carry[4] = (h4 + (1 << 25)) >> 26
640 h5 += carry[4]
641 h4 -= carry[4] << 26
642 carry[8] = (h8 + (1 << 25)) >> 26
643 h9 += carry[8]
644 h8 -= carry[8] << 26
645
646 carry[9] = (h9 + (1 << 24)) >> 25
647 h0 += carry[9] * 19
648 h9 -= carry[9] << 25
649
650 carry[0] = (h0 + (1 << 25)) >> 26
651 h1 += carry[0]
652 h0 -= carry[0] << 26
653
654 h[0] = int32(h0)
655 h[1] = int32(h1)
656 h[2] = int32(h2)
657 h[3] = int32(h3)
658 h[4] = int32(h4)
659 h[5] = int32(h5)
660 h[6] = int32(h6)
661 h[7] = int32(h7)
662 h[8] = int32(h8)
663 h[9] = int32(h9)
664}
665
666// feMul121666 calculates h = f * 121666. Can overlap h with f.
667//
668// Preconditions:
669// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
670//
671// Postconditions:
672// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
673func feMul121666(h, f *fieldElement) {
674 h0 := int64(f[0]) * 121666
675 h1 := int64(f[1]) * 121666
676 h2 := int64(f[2]) * 121666
677 h3 := int64(f[3]) * 121666
678 h4 := int64(f[4]) * 121666
679 h5 := int64(f[5]) * 121666
680 h6 := int64(f[6]) * 121666
681 h7 := int64(f[7]) * 121666
682 h8 := int64(f[8]) * 121666
683 h9 := int64(f[9]) * 121666
684 var carry [10]int64
685
686 carry[9] = (h9 + (1 << 24)) >> 25
687 h0 += carry[9] * 19
688 h9 -= carry[9] << 25
689 carry[1] = (h1 + (1 << 24)) >> 25
690 h2 += carry[1]
691 h1 -= carry[1] << 25
692 carry[3] = (h3 + (1 << 24)) >> 25
693 h4 += carry[3]
694 h3 -= carry[3] << 25
695 carry[5] = (h5 + (1 << 24)) >> 25
696 h6 += carry[5]
697 h5 -= carry[5] << 25
698 carry[7] = (h7 + (1 << 24)) >> 25
699 h8 += carry[7]
700 h7 -= carry[7] << 25
701
702 carry[0] = (h0 + (1 << 25)) >> 26
703 h1 += carry[0]
704 h0 -= carry[0] << 26
705 carry[2] = (h2 + (1 << 25)) >> 26
706 h3 += carry[2]
707 h2 -= carry[2] << 26
708 carry[4] = (h4 + (1 << 25)) >> 26
709 h5 += carry[4]
710 h4 -= carry[4] << 26
711 carry[6] = (h6 + (1 << 25)) >> 26
712 h7 += carry[6]
713 h6 -= carry[6] << 26
714 carry[8] = (h8 + (1 << 25)) >> 26
715 h9 += carry[8]
716 h8 -= carry[8] << 26
717
718 h[0] = int32(h0)
719 h[1] = int32(h1)
720 h[2] = int32(h2)
721 h[3] = int32(h3)
722 h[4] = int32(h4)
723 h[5] = int32(h5)
724 h[6] = int32(h6)
725 h[7] = int32(h7)
726 h[8] = int32(h8)
727 h[9] = int32(h9)
728}
729
730// feInvert sets out = z^-1.
731func feInvert(out, z *fieldElement) {
732 var t0, t1, t2, t3 fieldElement
733 var i int
734
735 feSquare(&t0, z)
736 for i = 1; i < 1; i++ {
737 feSquare(&t0, &t0)
738 }
739 feSquare(&t1, &t0)
740 for i = 1; i < 2; i++ {
741 feSquare(&t1, &t1)
742 }
743 feMul(&t1, z, &t1)
744 feMul(&t0, &t0, &t1)
745 feSquare(&t2, &t0)
746 for i = 1; i < 1; i++ {
747 feSquare(&t2, &t2)
748 }
749 feMul(&t1, &t1, &t2)
750 feSquare(&t2, &t1)
751 for i = 1; i < 5; i++ {
752 feSquare(&t2, &t2)
753 }
754 feMul(&t1, &t2, &t1)
755 feSquare(&t2, &t1)
756 for i = 1; i < 10; i++ {
757 feSquare(&t2, &t2)
758 }
759 feMul(&t2, &t2, &t1)
760 feSquare(&t3, &t2)
761 for i = 1; i < 20; i++ {
762 feSquare(&t3, &t3)
763 }
764 feMul(&t2, &t3, &t2)
765 feSquare(&t2, &t2)
766 for i = 1; i < 10; i++ {
767 feSquare(&t2, &t2)
768 }
769 feMul(&t1, &t2, &t1)
770 feSquare(&t2, &t1)
771 for i = 1; i < 50; i++ {
772 feSquare(&t2, &t2)
773 }
774 feMul(&t2, &t2, &t1)
775 feSquare(&t3, &t2)
776 for i = 1; i < 100; i++ {
777 feSquare(&t3, &t3)
778 }
779 feMul(&t2, &t3, &t2)
780 feSquare(&t2, &t2)
781 for i = 1; i < 50; i++ {
782 feSquare(&t2, &t2)
783 }
784 feMul(&t1, &t2, &t1)
785 feSquare(&t1, &t1)
786 for i = 1; i < 5; i++ {
787 feSquare(&t1, &t1)
788 }
789 feMul(out, &t1, &t0)
790}
791
792func scalarMult(out, in, base *[32]byte) {
793 var e [32]byte
794
795 copy(e[:], in[:])
796 e[0] &= 248
797 e[31] &= 127
798 e[31] |= 64
799
800 var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
801 feFromBytes(&x1, base)
802 feOne(&x2)
803 feCopy(&x3, &x1)
804 feOne(&z3)
805
806 swap := int32(0)
807 for pos := 254; pos >= 0; pos-- {
808 b := e[pos/8] >> uint(pos&7)
809 b &= 1
810 swap ^= int32(b)
811 feCSwap(&x2, &x3, swap)
812 feCSwap(&z2, &z3, swap)
813 swap = int32(b)
814
815 feSub(&tmp0, &x3, &z3)
816 feSub(&tmp1, &x2, &z2)
817 feAdd(&x2, &x2, &z2)
818 feAdd(&z2, &x3, &z3)
819 feMul(&z3, &tmp0, &x2)
820 feMul(&z2, &z2, &tmp1)
821 feSquare(&tmp0, &tmp1)
822 feSquare(&tmp1, &x2)
823 feAdd(&x3, &z3, &z2)
824 feSub(&z2, &z3, &z2)
825 feMul(&x2, &tmp1, &tmp0)
826 feSub(&tmp1, &tmp1, &tmp0)
827 feSquare(&z2, &z2)
828 feMul121666(&z3, &tmp1)
829 feSquare(&x3, &x3)
830 feAdd(&tmp0, &tmp0, &z3)
831 feMul(&z3, &x1, &z2)
832 feMul(&z2, &tmp1, &tmp0)
833 }
834
835 feCSwap(&x2, &x3, swap)
836 feCSwap(&z2, &z3, swap)
837
838 feInvert(&z2, &z2)
839 feMul(&x2, &x2, &z2)
840 feToBytes(out, &x2)
841}
diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go
new file mode 100644
index 0000000..ebeea3c
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/doc.go
@@ -0,0 +1,23 @@
1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package curve25519 provides an implementation of scalar multiplication on
6// the elliptic curve known as curve25519. See http://cr.yp.to/ecdh.html
7package curve25519 // import "golang.org/x/crypto/curve25519"
8
9// basePoint is the x coordinate of the generator of the curve.
10var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
11
12// ScalarMult sets dst to the product in*base where dst and base are the x
13// coordinates of group points and all values are in little-endian form.
14func ScalarMult(dst, in, base *[32]byte) {
15 scalarMult(dst, in, base)
16}
17
18// ScalarBaseMult sets dst to the product in*base where dst and base are the x
19// coordinates of group points, base is the standard generator and all values
20// are in little-endian form.
21func ScalarBaseMult(dst, in *[32]byte) {
22 ScalarMult(dst, in, &basePoint)
23}
diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
new file mode 100644
index 0000000..536479b
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
@@ -0,0 +1,73 @@
1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// This code was translated into a form compatible with 6a from the public
6// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
7
8// +build amd64,!gccgo,!appengine
9
10#include "const_amd64.h"
11
12// func freeze(inout *[5]uint64)
13TEXT ·freeze(SB),7,$0-8
14 MOVQ inout+0(FP), DI
15
16 MOVQ 0(DI),SI
17 MOVQ 8(DI),DX
18 MOVQ 16(DI),CX
19 MOVQ 24(DI),R8
20 MOVQ 32(DI),R9
21 MOVQ $REDMASK51,AX
22 MOVQ AX,R10
23 SUBQ $18,R10
24 MOVQ $3,R11
25REDUCELOOP:
26 MOVQ SI,R12
27 SHRQ $51,R12
28 ANDQ AX,SI
29 ADDQ R12,DX
30 MOVQ DX,R12
31 SHRQ $51,R12
32 ANDQ AX,DX
33 ADDQ R12,CX
34 MOVQ CX,R12
35 SHRQ $51,R12
36 ANDQ AX,CX
37 ADDQ R12,R8
38 MOVQ R8,R12
39 SHRQ $51,R12
40 ANDQ AX,R8
41 ADDQ R12,R9
42 MOVQ R9,R12
43 SHRQ $51,R12
44 ANDQ AX,R9
45 IMUL3Q $19,R12,R12
46 ADDQ R12,SI
47 SUBQ $1,R11
48 JA REDUCELOOP
49 MOVQ $1,R12
50 CMPQ R10,SI
51 CMOVQLT R11,R12
52 CMPQ AX,DX
53 CMOVQNE R11,R12
54 CMPQ AX,CX
55 CMOVQNE R11,R12
56 CMPQ AX,R8
57 CMOVQNE R11,R12
58 CMPQ AX,R9
59 CMOVQNE R11,R12
60 NEGQ R12
61 ANDQ R12,AX
62 ANDQ R12,R10
63 SUBQ R10,SI
64 SUBQ AX,DX
65 SUBQ AX,CX
66 SUBQ AX,R8
67 SUBQ AX,R9
68 MOVQ SI,0(DI)
69 MOVQ DX,8(DI)
70 MOVQ CX,16(DI)
71 MOVQ R8,24(DI)
72 MOVQ R9,32(DI)
73 RET
diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
new file mode 100644
index 0000000..7074e5c
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
@@ -0,0 +1,1377 @@
1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// This code was translated into a form compatible with 6a from the public
6// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
7
8// +build amd64,!gccgo,!appengine
9
10#include "const_amd64.h"
11
12// func ladderstep(inout *[5][5]uint64)
13TEXT ·ladderstep(SB),0,$296-8
14 MOVQ inout+0(FP),DI
15
16 MOVQ 40(DI),SI
17 MOVQ 48(DI),DX
18 MOVQ 56(DI),CX
19 MOVQ 64(DI),R8
20 MOVQ 72(DI),R9
21 MOVQ SI,AX
22 MOVQ DX,R10
23 MOVQ CX,R11
24 MOVQ R8,R12
25 MOVQ R9,R13
26 ADDQ ·_2P0(SB),AX
27 ADDQ ·_2P1234(SB),R10
28 ADDQ ·_2P1234(SB),R11
29 ADDQ ·_2P1234(SB),R12
30 ADDQ ·_2P1234(SB),R13
31 ADDQ 80(DI),SI
32 ADDQ 88(DI),DX
33 ADDQ 96(DI),CX
34 ADDQ 104(DI),R8
35 ADDQ 112(DI),R9
36 SUBQ 80(DI),AX
37 SUBQ 88(DI),R10
38 SUBQ 96(DI),R11
39 SUBQ 104(DI),R12
40 SUBQ 112(DI),R13
41 MOVQ SI,0(SP)
42 MOVQ DX,8(SP)
43 MOVQ CX,16(SP)
44 MOVQ R8,24(SP)
45 MOVQ R9,32(SP)
46 MOVQ AX,40(SP)
47 MOVQ R10,48(SP)
48 MOVQ R11,56(SP)
49 MOVQ R12,64(SP)
50 MOVQ R13,72(SP)
51 MOVQ 40(SP),AX
52 MULQ 40(SP)
53 MOVQ AX,SI
54 MOVQ DX,CX
55 MOVQ 40(SP),AX
56 SHLQ $1,AX
57 MULQ 48(SP)
58 MOVQ AX,R8
59 MOVQ DX,R9
60 MOVQ 40(SP),AX
61 SHLQ $1,AX
62 MULQ 56(SP)
63 MOVQ AX,R10
64 MOVQ DX,R11
65 MOVQ 40(SP),AX
66 SHLQ $1,AX
67 MULQ 64(SP)
68 MOVQ AX,R12
69 MOVQ DX,R13
70 MOVQ 40(SP),AX
71 SHLQ $1,AX
72 MULQ 72(SP)
73 MOVQ AX,R14
74 MOVQ DX,R15
75 MOVQ 48(SP),AX
76 MULQ 48(SP)
77 ADDQ AX,R10
78 ADCQ DX,R11
79 MOVQ 48(SP),AX
80 SHLQ $1,AX
81 MULQ 56(SP)
82 ADDQ AX,R12
83 ADCQ DX,R13
84 MOVQ 48(SP),AX
85 SHLQ $1,AX
86 MULQ 64(SP)
87 ADDQ AX,R14
88 ADCQ DX,R15
89 MOVQ 48(SP),DX
90 IMUL3Q $38,DX,AX
91 MULQ 72(SP)
92 ADDQ AX,SI
93 ADCQ DX,CX
94 MOVQ 56(SP),AX
95 MULQ 56(SP)
96 ADDQ AX,R14
97 ADCQ DX,R15
98 MOVQ 56(SP),DX
99 IMUL3Q $38,DX,AX
100 MULQ 64(SP)
101 ADDQ AX,SI
102 ADCQ DX,CX
103 MOVQ 56(SP),DX
104 IMUL3Q $38,DX,AX
105 MULQ 72(SP)
106 ADDQ AX,R8
107 ADCQ DX,R9
108 MOVQ 64(SP),DX
109 IMUL3Q $19,DX,AX
110 MULQ 64(SP)
111 ADDQ AX,R8
112 ADCQ DX,R9
113 MOVQ 64(SP),DX
114 IMUL3Q $38,DX,AX
115 MULQ 72(SP)
116 ADDQ AX,R10
117 ADCQ DX,R11
118 MOVQ 72(SP),DX
119 IMUL3Q $19,DX,AX
120 MULQ 72(SP)
121 ADDQ AX,R12
122 ADCQ DX,R13
123 MOVQ $REDMASK51,DX
124 SHLQ $13,CX:SI
125 ANDQ DX,SI
126 SHLQ $13,R9:R8
127 ANDQ DX,R8
128 ADDQ CX,R8
129 SHLQ $13,R11:R10
130 ANDQ DX,R10
131 ADDQ R9,R10
132 SHLQ $13,R13:R12
133 ANDQ DX,R12
134 ADDQ R11,R12
135 SHLQ $13,R15:R14
136 ANDQ DX,R14
137 ADDQ R13,R14
138 IMUL3Q $19,R15,CX
139 ADDQ CX,SI
140 MOVQ SI,CX
141 SHRQ $51,CX
142 ADDQ R8,CX
143 ANDQ DX,SI
144 MOVQ CX,R8
145 SHRQ $51,CX
146 ADDQ R10,CX
147 ANDQ DX,R8
148 MOVQ CX,R9
149 SHRQ $51,CX
150 ADDQ R12,CX
151 ANDQ DX,R9
152 MOVQ CX,AX
153 SHRQ $51,CX
154 ADDQ R14,CX
155 ANDQ DX,AX
156 MOVQ CX,R10
157 SHRQ $51,CX
158 IMUL3Q $19,CX,CX
159 ADDQ CX,SI
160 ANDQ DX,R10
161 MOVQ SI,80(SP)
162 MOVQ R8,88(SP)
163 MOVQ R9,96(SP)
164 MOVQ AX,104(SP)
165 MOVQ R10,112(SP)
166 MOVQ 0(SP),AX
167 MULQ 0(SP)
168 MOVQ AX,SI
169 MOVQ DX,CX
170 MOVQ 0(SP),AX
171 SHLQ $1,AX
172 MULQ 8(SP)
173 MOVQ AX,R8
174 MOVQ DX,R9
175 MOVQ 0(SP),AX
176 SHLQ $1,AX
177 MULQ 16(SP)
178 MOVQ AX,R10
179 MOVQ DX,R11
180 MOVQ 0(SP),AX
181 SHLQ $1,AX
182 MULQ 24(SP)
183 MOVQ AX,R12
184 MOVQ DX,R13
185 MOVQ 0(SP),AX
186 SHLQ $1,AX
187 MULQ 32(SP)
188 MOVQ AX,R14
189 MOVQ DX,R15
190 MOVQ 8(SP),AX
191 MULQ 8(SP)
192 ADDQ AX,R10
193 ADCQ DX,R11
194 MOVQ 8(SP),AX
195 SHLQ $1,AX
196 MULQ 16(SP)
197 ADDQ AX,R12
198 ADCQ DX,R13
199 MOVQ 8(SP),AX
200 SHLQ $1,AX
201 MULQ 24(SP)
202 ADDQ AX,R14
203 ADCQ DX,R15
204 MOVQ 8(SP),DX
205 IMUL3Q $38,DX,AX
206 MULQ 32(SP)
207 ADDQ AX,SI
208 ADCQ DX,CX
209 MOVQ 16(SP),AX
210 MULQ 16(SP)
211 ADDQ AX,R14
212 ADCQ DX,R15
213 MOVQ 16(SP),DX
214 IMUL3Q $38,DX,AX
215 MULQ 24(SP)
216 ADDQ AX,SI
217 ADCQ DX,CX
218 MOVQ 16(SP),DX
219 IMUL3Q $38,DX,AX
220 MULQ 32(SP)
221 ADDQ AX,R8
222 ADCQ DX,R9
223 MOVQ 24(SP),DX
224 IMUL3Q $19,DX,AX
225 MULQ 24(SP)
226 ADDQ AX,R8
227 ADCQ DX,R9
228 MOVQ 24(SP),DX
229 IMUL3Q $38,DX,AX
230 MULQ 32(SP)
231 ADDQ AX,R10
232 ADCQ DX,R11
233 MOVQ 32(SP),DX
234 IMUL3Q $19,DX,AX
235 MULQ 32(SP)
236 ADDQ AX,R12
237 ADCQ DX,R13
238 MOVQ $REDMASK51,DX
239 SHLQ $13,CX:SI
240 ANDQ DX,SI
241 SHLQ $13,R9:R8
242 ANDQ DX,R8
243 ADDQ CX,R8
244 SHLQ $13,R11:R10
245 ANDQ DX,R10
246 ADDQ R9,R10
247 SHLQ $13,R13:R12
248 ANDQ DX,R12
249 ADDQ R11,R12
250 SHLQ $13,R15:R14
251 ANDQ DX,R14
252 ADDQ R13,R14
253 IMUL3Q $19,R15,CX
254 ADDQ CX,SI
255 MOVQ SI,CX
256 SHRQ $51,CX
257 ADDQ R8,CX
258 ANDQ DX,SI
259 MOVQ CX,R8
260 SHRQ $51,CX
261 ADDQ R10,CX
262 ANDQ DX,R8
263 MOVQ CX,R9
264 SHRQ $51,CX
265 ADDQ R12,CX
266 ANDQ DX,R9
267 MOVQ CX,AX
268 SHRQ $51,CX
269 ADDQ R14,CX
270 ANDQ DX,AX
271 MOVQ CX,R10
272 SHRQ $51,CX
273 IMUL3Q $19,CX,CX
274 ADDQ CX,SI
275 ANDQ DX,R10
276 MOVQ SI,120(SP)
277 MOVQ R8,128(SP)
278 MOVQ R9,136(SP)
279 MOVQ AX,144(SP)
280 MOVQ R10,152(SP)
281 MOVQ SI,SI
282 MOVQ R8,DX
283 MOVQ R9,CX
284 MOVQ AX,R8
285 MOVQ R10,R9
286 ADDQ ·_2P0(SB),SI
287 ADDQ ·_2P1234(SB),DX
288 ADDQ ·_2P1234(SB),CX
289 ADDQ ·_2P1234(SB),R8
290 ADDQ ·_2P1234(SB),R9
291 SUBQ 80(SP),SI
292 SUBQ 88(SP),DX
293 SUBQ 96(SP),CX
294 SUBQ 104(SP),R8
295 SUBQ 112(SP),R9
296 MOVQ SI,160(SP)
297 MOVQ DX,168(SP)
298 MOVQ CX,176(SP)
299 MOVQ R8,184(SP)
300 MOVQ R9,192(SP)
301 MOVQ 120(DI),SI
302 MOVQ 128(DI),DX
303 MOVQ 136(DI),CX
304 MOVQ 144(DI),R8
305 MOVQ 152(DI),R9
306 MOVQ SI,AX
307 MOVQ DX,R10
308 MOVQ CX,R11
309 MOVQ R8,R12
310 MOVQ R9,R13
311 ADDQ ·_2P0(SB),AX
312 ADDQ ·_2P1234(SB),R10
313 ADDQ ·_2P1234(SB),R11
314 ADDQ ·_2P1234(SB),R12
315 ADDQ ·_2P1234(SB),R13
316 ADDQ 160(DI),SI
317 ADDQ 168(DI),DX
318 ADDQ 176(DI),CX
319 ADDQ 184(DI),R8
320 ADDQ 192(DI),R9
321 SUBQ 160(DI),AX
322 SUBQ 168(DI),R10
323 SUBQ 176(DI),R11
324 SUBQ 184(DI),R12
325 SUBQ 192(DI),R13
326 MOVQ SI,200(SP)
327 MOVQ DX,208(SP)
328 MOVQ CX,216(SP)
329 MOVQ R8,224(SP)
330 MOVQ R9,232(SP)
331 MOVQ AX,240(SP)
332 MOVQ R10,248(SP)
333 MOVQ R11,256(SP)
334 MOVQ R12,264(SP)
335 MOVQ R13,272(SP)
336 MOVQ 224(SP),SI
337 IMUL3Q $19,SI,AX
338 MOVQ AX,280(SP)
339 MULQ 56(SP)
340 MOVQ AX,SI
341 MOVQ DX,CX
342 MOVQ 232(SP),DX
343 IMUL3Q $19,DX,AX
344 MOVQ AX,288(SP)
345 MULQ 48(SP)
346 ADDQ AX,SI
347 ADCQ DX,CX
348 MOVQ 200(SP),AX
349 MULQ 40(SP)
350 ADDQ AX,SI
351 ADCQ DX,CX
352 MOVQ 200(SP),AX
353 MULQ 48(SP)
354 MOVQ AX,R8
355 MOVQ DX,R9
356 MOVQ 200(SP),AX
357 MULQ 56(SP)
358 MOVQ AX,R10
359 MOVQ DX,R11
360 MOVQ 200(SP),AX
361 MULQ 64(SP)
362 MOVQ AX,R12
363 MOVQ DX,R13
364 MOVQ 200(SP),AX
365 MULQ 72(SP)
366 MOVQ AX,R14
367 MOVQ DX,R15
368 MOVQ 208(SP),AX
369 MULQ 40(SP)
370 ADDQ AX,R8
371 ADCQ DX,R9
372 MOVQ 208(SP),AX
373 MULQ 48(SP)
374 ADDQ AX,R10
375 ADCQ DX,R11
376 MOVQ 208(SP),AX
377 MULQ 56(SP)
378 ADDQ AX,R12
379 ADCQ DX,R13
380 MOVQ 208(SP),AX
381 MULQ 64(SP)
382 ADDQ AX,R14
383 ADCQ DX,R15
384 MOVQ 208(SP),DX
385 IMUL3Q $19,DX,AX
386 MULQ 72(SP)
387 ADDQ AX,SI
388 ADCQ DX,CX
389 MOVQ 216(SP),AX
390 MULQ 40(SP)
391 ADDQ AX,R10
392 ADCQ DX,R11
393 MOVQ 216(SP),AX
394 MULQ 48(SP)
395 ADDQ AX,R12
396 ADCQ DX,R13
397 MOVQ 216(SP),AX
398 MULQ 56(SP)
399 ADDQ AX,R14
400 ADCQ DX,R15
401 MOVQ 216(SP),DX
402 IMUL3Q $19,DX,AX
403 MULQ 64(SP)
404 ADDQ AX,SI
405 ADCQ DX,CX
406 MOVQ 216(SP),DX
407 IMUL3Q $19,DX,AX
408 MULQ 72(SP)
409 ADDQ AX,R8
410 ADCQ DX,R9
411 MOVQ 224(SP),AX
412 MULQ 40(SP)
413 ADDQ AX,R12
414 ADCQ DX,R13
415 MOVQ 224(SP),AX
416 MULQ 48(SP)
417 ADDQ AX,R14
418 ADCQ DX,R15
419 MOVQ 280(SP),AX
420 MULQ 64(SP)
421 ADDQ AX,R8
422 ADCQ DX,R9
423 MOVQ 280(SP),AX
424 MULQ 72(SP)
425 ADDQ AX,R10
426 ADCQ DX,R11
427 MOVQ 232(SP),AX
428 MULQ 40(SP)
429 ADDQ AX,R14
430 ADCQ DX,R15
431 MOVQ 288(SP),AX
432 MULQ 56(SP)
433 ADDQ AX,R8
434 ADCQ DX,R9
435 MOVQ 288(SP),AX
436 MULQ 64(SP)
437 ADDQ AX,R10
438 ADCQ DX,R11
439 MOVQ 288(SP),AX
440 MULQ 72(SP)
441 ADDQ AX,R12
442 ADCQ DX,R13
443 MOVQ $REDMASK51,DX
444 SHLQ $13,CX:SI
445 ANDQ DX,SI
446 SHLQ $13,R9:R8
447 ANDQ DX,R8
448 ADDQ CX,R8
449 SHLQ $13,R11:R10
450 ANDQ DX,R10
451 ADDQ R9,R10
452 SHLQ $13,R13:R12
453 ANDQ DX,R12
454 ADDQ R11,R12
455 SHLQ $13,R15:R14
456 ANDQ DX,R14
457 ADDQ R13,R14
458 IMUL3Q $19,R15,CX
459 ADDQ CX,SI
460 MOVQ SI,CX
461 SHRQ $51,CX
462 ADDQ R8,CX
463 MOVQ CX,R8
464 SHRQ $51,CX
465 ANDQ DX,SI
466 ADDQ R10,CX
467 MOVQ CX,R9
468 SHRQ $51,CX
469 ANDQ DX,R8
470 ADDQ R12,CX
471 MOVQ CX,AX
472 SHRQ $51,CX
473 ANDQ DX,R9
474 ADDQ R14,CX
475 MOVQ CX,R10
476 SHRQ $51,CX
477 ANDQ DX,AX
478 IMUL3Q $19,CX,CX
479 ADDQ CX,SI
480 ANDQ DX,R10
481 MOVQ SI,40(SP)
482 MOVQ R8,48(SP)
483 MOVQ R9,56(SP)
484 MOVQ AX,64(SP)
485 MOVQ R10,72(SP)
486 MOVQ 264(SP),SI
487 IMUL3Q $19,SI,AX
488 MOVQ AX,200(SP)
489 MULQ 16(SP)
490 MOVQ AX,SI
491 MOVQ DX,CX
492 MOVQ 272(SP),DX
493 IMUL3Q $19,DX,AX
494 MOVQ AX,208(SP)
495 MULQ 8(SP)
496 ADDQ AX,SI
497 ADCQ DX,CX
498 MOVQ 240(SP),AX
499 MULQ 0(SP)
500 ADDQ AX,SI
501 ADCQ DX,CX
502 MOVQ 240(SP),AX
503 MULQ 8(SP)
504 MOVQ AX,R8
505 MOVQ DX,R9
506 MOVQ 240(SP),AX
507 MULQ 16(SP)
508 MOVQ AX,R10
509 MOVQ DX,R11
510 MOVQ 240(SP),AX
511 MULQ 24(SP)
512 MOVQ AX,R12
513 MOVQ DX,R13
514 MOVQ 240(SP),AX
515 MULQ 32(SP)
516 MOVQ AX,R14
517 MOVQ DX,R15
518 MOVQ 248(SP),AX
519 MULQ 0(SP)
520 ADDQ AX,R8
521 ADCQ DX,R9
522 MOVQ 248(SP),AX
523 MULQ 8(SP)
524 ADDQ AX,R10
525 ADCQ DX,R11
526 MOVQ 248(SP),AX
527 MULQ 16(SP)
528 ADDQ AX,R12
529 ADCQ DX,R13
530 MOVQ 248(SP),AX
531 MULQ 24(SP)
532 ADDQ AX,R14
533 ADCQ DX,R15
534 MOVQ 248(SP),DX
535 IMUL3Q $19,DX,AX
536 MULQ 32(SP)
537 ADDQ AX,SI
538 ADCQ DX,CX
539 MOVQ 256(SP),AX
540 MULQ 0(SP)
541 ADDQ AX,R10
542 ADCQ DX,R11
543 MOVQ 256(SP),AX
544 MULQ 8(SP)
545 ADDQ AX,R12
546 ADCQ DX,R13
547 MOVQ 256(SP),AX
548 MULQ 16(SP)
549 ADDQ AX,R14
550 ADCQ DX,R15
551 MOVQ 256(SP),DX
552 IMUL3Q $19,DX,AX
553 MULQ 24(SP)
554 ADDQ AX,SI
555 ADCQ DX,CX
556 MOVQ 256(SP),DX
557 IMUL3Q $19,DX,AX
558 MULQ 32(SP)
559 ADDQ AX,R8
560 ADCQ DX,R9
561 MOVQ 264(SP),AX
562 MULQ 0(SP)
563 ADDQ AX,R12
564 ADCQ DX,R13
565 MOVQ 264(SP),AX
566 MULQ 8(SP)
567 ADDQ AX,R14
568 ADCQ DX,R15
569 MOVQ 200(SP),AX
570 MULQ 24(SP)
571 ADDQ AX,R8
572 ADCQ DX,R9
573 MOVQ 200(SP),AX
574 MULQ 32(SP)
575 ADDQ AX,R10
576 ADCQ DX,R11
577 MOVQ 272(SP),AX
578 MULQ 0(SP)
579 ADDQ AX,R14
580 ADCQ DX,R15
581 MOVQ 208(SP),AX
582 MULQ 16(SP)
583 ADDQ AX,R8
584 ADCQ DX,R9
585 MOVQ 208(SP),AX
586 MULQ 24(SP)
587 ADDQ AX,R10
588 ADCQ DX,R11
589 MOVQ 208(SP),AX
590 MULQ 32(SP)
591 ADDQ AX,R12
592 ADCQ DX,R13
593 MOVQ $REDMASK51,DX
594 SHLQ $13,CX:SI
595 ANDQ DX,SI
596 SHLQ $13,R9:R8
597 ANDQ DX,R8
598 ADDQ CX,R8
599 SHLQ $13,R11:R10
600 ANDQ DX,R10
601 ADDQ R9,R10
602 SHLQ $13,R13:R12
603 ANDQ DX,R12
604 ADDQ R11,R12
605 SHLQ $13,R15:R14
606 ANDQ DX,R14
607 ADDQ R13,R14
608 IMUL3Q $19,R15,CX
609 ADDQ CX,SI
610 MOVQ SI,CX
611 SHRQ $51,CX
612 ADDQ R8,CX
613 MOVQ CX,R8
614 SHRQ $51,CX
615 ANDQ DX,SI
616 ADDQ R10,CX
617 MOVQ CX,R9
618 SHRQ $51,CX
619 ANDQ DX,R8
620 ADDQ R12,CX
621 MOVQ CX,AX
622 SHRQ $51,CX
623 ANDQ DX,R9
624 ADDQ R14,CX
625 MOVQ CX,R10
626 SHRQ $51,CX
627 ANDQ DX,AX
628 IMUL3Q $19,CX,CX
629 ADDQ CX,SI
630 ANDQ DX,R10
631 MOVQ SI,DX
632 MOVQ R8,CX
633 MOVQ R9,R11
634 MOVQ AX,R12
635 MOVQ R10,R13
636 ADDQ ·_2P0(SB),DX
637 ADDQ ·_2P1234(SB),CX
638 ADDQ ·_2P1234(SB),R11
639 ADDQ ·_2P1234(SB),R12
640 ADDQ ·_2P1234(SB),R13
641 ADDQ 40(SP),SI
642 ADDQ 48(SP),R8
643 ADDQ 56(SP),R9
644 ADDQ 64(SP),AX
645 ADDQ 72(SP),R10
646 SUBQ 40(SP),DX
647 SUBQ 48(SP),CX
648 SUBQ 56(SP),R11
649 SUBQ 64(SP),R12
650 SUBQ 72(SP),R13
651 MOVQ SI,120(DI)
652 MOVQ R8,128(DI)
653 MOVQ R9,136(DI)
654 MOVQ AX,144(DI)
655 MOVQ R10,152(DI)
656 MOVQ DX,160(DI)
657 MOVQ CX,168(DI)
658 MOVQ R11,176(DI)
659 MOVQ R12,184(DI)
660 MOVQ R13,192(DI)
661 MOVQ 120(DI),AX
662 MULQ 120(DI)
663 MOVQ AX,SI
664 MOVQ DX,CX
665 MOVQ 120(DI),AX
666 SHLQ $1,AX
667 MULQ 128(DI)
668 MOVQ AX,R8
669 MOVQ DX,R9
670 MOVQ 120(DI),AX
671 SHLQ $1,AX
672 MULQ 136(DI)
673 MOVQ AX,R10
674 MOVQ DX,R11
675 MOVQ 120(DI),AX
676 SHLQ $1,AX
677 MULQ 144(DI)
678 MOVQ AX,R12
679 MOVQ DX,R13
680 MOVQ 120(DI),AX
681 SHLQ $1,AX
682 MULQ 152(DI)
683 MOVQ AX,R14
684 MOVQ DX,R15
685 MOVQ 128(DI),AX
686 MULQ 128(DI)
687 ADDQ AX,R10
688 ADCQ DX,R11
689 MOVQ 128(DI),AX
690 SHLQ $1,AX
691 MULQ 136(DI)
692 ADDQ AX,R12
693 ADCQ DX,R13
694 MOVQ 128(DI),AX
695 SHLQ $1,AX
696 MULQ 144(DI)
697 ADDQ AX,R14
698 ADCQ DX,R15
699 MOVQ 128(DI),DX
700 IMUL3Q $38,DX,AX
701 MULQ 152(DI)
702 ADDQ AX,SI
703 ADCQ DX,CX
704 MOVQ 136(DI),AX
705 MULQ 136(DI)
706 ADDQ AX,R14
707 ADCQ DX,R15
708 MOVQ 136(DI),DX
709 IMUL3Q $38,DX,AX
710 MULQ 144(DI)
711 ADDQ AX,SI
712 ADCQ DX,CX
713 MOVQ 136(DI),DX
714 IMUL3Q $38,DX,AX
715 MULQ 152(DI)
716 ADDQ AX,R8
717 ADCQ DX,R9
718 MOVQ 144(DI),DX
719 IMUL3Q $19,DX,AX
720 MULQ 144(DI)
721 ADDQ AX,R8
722 ADCQ DX,R9
723 MOVQ 144(DI),DX
724 IMUL3Q $38,DX,AX
725 MULQ 152(DI)
726 ADDQ AX,R10
727 ADCQ DX,R11
728 MOVQ 152(DI),DX
729 IMUL3Q $19,DX,AX
730 MULQ 152(DI)
731 ADDQ AX,R12
732 ADCQ DX,R13
733 MOVQ $REDMASK51,DX
734 SHLQ $13,CX:SI
735 ANDQ DX,SI
736 SHLQ $13,R9:R8
737 ANDQ DX,R8
738 ADDQ CX,R8
739 SHLQ $13,R11:R10
740 ANDQ DX,R10
741 ADDQ R9,R10
742 SHLQ $13,R13:R12
743 ANDQ DX,R12
744 ADDQ R11,R12
745 SHLQ $13,R15:R14
746 ANDQ DX,R14
747 ADDQ R13,R14
748 IMUL3Q $19,R15,CX
749 ADDQ CX,SI
750 MOVQ SI,CX
751 SHRQ $51,CX
752 ADDQ R8,CX
753 ANDQ DX,SI
754 MOVQ CX,R8
755 SHRQ $51,CX
756 ADDQ R10,CX
757 ANDQ DX,R8
758 MOVQ CX,R9
759 SHRQ $51,CX
760 ADDQ R12,CX
761 ANDQ DX,R9
762 MOVQ CX,AX
763 SHRQ $51,CX
764 ADDQ R14,CX
765 ANDQ DX,AX
766 MOVQ CX,R10
767 SHRQ $51,CX
768 IMUL3Q $19,CX,CX
769 ADDQ CX,SI
770 ANDQ DX,R10
771 MOVQ SI,120(DI)
772 MOVQ R8,128(DI)
773 MOVQ R9,136(DI)
774 MOVQ AX,144(DI)
775 MOVQ R10,152(DI)
776 MOVQ 160(DI),AX
777 MULQ 160(DI)
778 MOVQ AX,SI
779 MOVQ DX,CX
780 MOVQ 160(DI),AX
781 SHLQ $1,AX
782 MULQ 168(DI)
783 MOVQ AX,R8
784 MOVQ DX,R9
785 MOVQ 160(DI),AX
786 SHLQ $1,AX
787 MULQ 176(DI)
788 MOVQ AX,R10
789 MOVQ DX,R11
790 MOVQ 160(DI),AX
791 SHLQ $1,AX
792 MULQ 184(DI)
793 MOVQ AX,R12
794 MOVQ DX,R13
795 MOVQ 160(DI),AX
796 SHLQ $1,AX
797 MULQ 192(DI)
798 MOVQ AX,R14
799 MOVQ DX,R15
800 MOVQ 168(DI),AX
801 MULQ 168(DI)
802 ADDQ AX,R10
803 ADCQ DX,R11
804 MOVQ 168(DI),AX
805 SHLQ $1,AX
806 MULQ 176(DI)
807 ADDQ AX,R12
808 ADCQ DX,R13
809 MOVQ 168(DI),AX
810 SHLQ $1,AX
811 MULQ 184(DI)
812 ADDQ AX,R14
813 ADCQ DX,R15
814 MOVQ 168(DI),DX
815 IMUL3Q $38,DX,AX
816 MULQ 192(DI)
817 ADDQ AX,SI
818 ADCQ DX,CX
819 MOVQ 176(DI),AX
820 MULQ 176(DI)
821 ADDQ AX,R14
822 ADCQ DX,R15
823 MOVQ 176(DI),DX
824 IMUL3Q $38,DX,AX
825 MULQ 184(DI)
826 ADDQ AX,SI
827 ADCQ DX,CX
828 MOVQ 176(DI),DX
829 IMUL3Q $38,DX,AX
830 MULQ 192(DI)
831 ADDQ AX,R8
832 ADCQ DX,R9
833 MOVQ 184(DI),DX
834 IMUL3Q $19,DX,AX
835 MULQ 184(DI)
836 ADDQ AX,R8
837 ADCQ DX,R9
838 MOVQ 184(DI),DX
839 IMUL3Q $38,DX,AX
840 MULQ 192(DI)
841 ADDQ AX,R10
842 ADCQ DX,R11
843 MOVQ 192(DI),DX
844 IMUL3Q $19,DX,AX
845 MULQ 192(DI)
846 ADDQ AX,R12
847 ADCQ DX,R13
848 MOVQ $REDMASK51,DX
849 SHLQ $13,CX:SI
850 ANDQ DX,SI
851 SHLQ $13,R9:R8
852 ANDQ DX,R8
853 ADDQ CX,R8
854 SHLQ $13,R11:R10
855 ANDQ DX,R10
856 ADDQ R9,R10
857 SHLQ $13,R13:R12
858 ANDQ DX,R12
859 ADDQ R11,R12
860 SHLQ $13,R15:R14
861 ANDQ DX,R14
862 ADDQ R13,R14
863 IMUL3Q $19,R15,CX
864 ADDQ CX,SI
865 MOVQ SI,CX
866 SHRQ $51,CX
867 ADDQ R8,CX
868 ANDQ DX,SI
869 MOVQ CX,R8
870 SHRQ $51,CX
871 ADDQ R10,CX
872 ANDQ DX,R8
873 MOVQ CX,R9
874 SHRQ $51,CX
875 ADDQ R12,CX
876 ANDQ DX,R9
877 MOVQ CX,AX
878 SHRQ $51,CX
879 ADDQ R14,CX
880 ANDQ DX,AX
881 MOVQ CX,R10
882 SHRQ $51,CX
883 IMUL3Q $19,CX,CX
884 ADDQ CX,SI
885 ANDQ DX,R10
886 MOVQ SI,160(DI)
887 MOVQ R8,168(DI)
888 MOVQ R9,176(DI)
889 MOVQ AX,184(DI)
890 MOVQ R10,192(DI)
891 MOVQ 184(DI),SI
892 IMUL3Q $19,SI,AX
893 MOVQ AX,0(SP)
894 MULQ 16(DI)
895 MOVQ AX,SI
896 MOVQ DX,CX
897 MOVQ 192(DI),DX
898 IMUL3Q $19,DX,AX
899 MOVQ AX,8(SP)
900 MULQ 8(DI)
901 ADDQ AX,SI
902 ADCQ DX,CX
903 MOVQ 160(DI),AX
904 MULQ 0(DI)
905 ADDQ AX,SI
906 ADCQ DX,CX
907 MOVQ 160(DI),AX
908 MULQ 8(DI)
909 MOVQ AX,R8
910 MOVQ DX,R9
911 MOVQ 160(DI),AX
912 MULQ 16(DI)
913 MOVQ AX,R10
914 MOVQ DX,R11
915 MOVQ 160(DI),AX
916 MULQ 24(DI)
917 MOVQ AX,R12
918 MOVQ DX,R13
919 MOVQ 160(DI),AX
920 MULQ 32(DI)
921 MOVQ AX,R14
922 MOVQ DX,R15
923 MOVQ 168(DI),AX
924 MULQ 0(DI)
925 ADDQ AX,R8
926 ADCQ DX,R9
927 MOVQ 168(DI),AX
928 MULQ 8(DI)
929 ADDQ AX,R10
930 ADCQ DX,R11
931 MOVQ 168(DI),AX
932 MULQ 16(DI)
933 ADDQ AX,R12
934 ADCQ DX,R13
935 MOVQ 168(DI),AX
936 MULQ 24(DI)
937 ADDQ AX,R14
938 ADCQ DX,R15
939 MOVQ 168(DI),DX
940 IMUL3Q $19,DX,AX
941 MULQ 32(DI)
942 ADDQ AX,SI
943 ADCQ DX,CX
944 MOVQ 176(DI),AX
945 MULQ 0(DI)
946 ADDQ AX,R10
947 ADCQ DX,R11
948 MOVQ 176(DI),AX
949 MULQ 8(DI)
950 ADDQ AX,R12
951 ADCQ DX,R13
952 MOVQ 176(DI),AX
953 MULQ 16(DI)
954 ADDQ AX,R14
955 ADCQ DX,R15
956 MOVQ 176(DI),DX
957 IMUL3Q $19,DX,AX
958 MULQ 24(DI)
959 ADDQ AX,SI
960 ADCQ DX,CX
961 MOVQ 176(DI),DX
962 IMUL3Q $19,DX,AX
963 MULQ 32(DI)
964 ADDQ AX,R8
965 ADCQ DX,R9
966 MOVQ 184(DI),AX
967 MULQ 0(DI)
968 ADDQ AX,R12
969 ADCQ DX,R13
970 MOVQ 184(DI),AX
971 MULQ 8(DI)
972 ADDQ AX,R14
973 ADCQ DX,R15
974 MOVQ 0(SP),AX
975 MULQ 24(DI)
976 ADDQ AX,R8
977 ADCQ DX,R9
978 MOVQ 0(SP),AX
979 MULQ 32(DI)
980 ADDQ AX,R10
981 ADCQ DX,R11
982 MOVQ 192(DI),AX
983 MULQ 0(DI)
984 ADDQ AX,R14
985 ADCQ DX,R15
986 MOVQ 8(SP),AX
987 MULQ 16(DI)
988 ADDQ AX,R8
989 ADCQ DX,R9
990 MOVQ 8(SP),AX
991 MULQ 24(DI)
992 ADDQ AX,R10
993 ADCQ DX,R11
994 MOVQ 8(SP),AX
995 MULQ 32(DI)
996 ADDQ AX,R12
997 ADCQ DX,R13
998 MOVQ $REDMASK51,DX
999 SHLQ $13,CX:SI
1000 ANDQ DX,SI
1001 SHLQ $13,R9:R8
1002 ANDQ DX,R8
1003 ADDQ CX,R8
1004 SHLQ $13,R11:R10
1005 ANDQ DX,R10
1006 ADDQ R9,R10
1007 SHLQ $13,R13:R12
1008 ANDQ DX,R12
1009 ADDQ R11,R12
1010 SHLQ $13,R15:R14
1011 ANDQ DX,R14
1012 ADDQ R13,R14
1013 IMUL3Q $19,R15,CX
1014 ADDQ CX,SI
1015 MOVQ SI,CX
1016 SHRQ $51,CX
1017 ADDQ R8,CX
1018 MOVQ CX,R8
1019 SHRQ $51,CX
1020 ANDQ DX,SI
1021 ADDQ R10,CX
1022 MOVQ CX,R9
1023 SHRQ $51,CX
1024 ANDQ DX,R8
1025 ADDQ R12,CX
1026 MOVQ CX,AX
1027 SHRQ $51,CX
1028 ANDQ DX,R9
1029 ADDQ R14,CX
1030 MOVQ CX,R10
1031 SHRQ $51,CX
1032 ANDQ DX,AX
1033 IMUL3Q $19,CX,CX
1034 ADDQ CX,SI
1035 ANDQ DX,R10
1036 MOVQ SI,160(DI)
1037 MOVQ R8,168(DI)
1038 MOVQ R9,176(DI)
1039 MOVQ AX,184(DI)
1040 MOVQ R10,192(DI)
1041 MOVQ 144(SP),SI
1042 IMUL3Q $19,SI,AX
1043 MOVQ AX,0(SP)
1044 MULQ 96(SP)
1045 MOVQ AX,SI
1046 MOVQ DX,CX
1047 MOVQ 152(SP),DX
1048 IMUL3Q $19,DX,AX
1049 MOVQ AX,8(SP)
1050 MULQ 88(SP)
1051 ADDQ AX,SI
1052 ADCQ DX,CX
1053 MOVQ 120(SP),AX
1054 MULQ 80(SP)
1055 ADDQ AX,SI
1056 ADCQ DX,CX
1057 MOVQ 120(SP),AX
1058 MULQ 88(SP)
1059 MOVQ AX,R8
1060 MOVQ DX,R9
1061 MOVQ 120(SP),AX
1062 MULQ 96(SP)
1063 MOVQ AX,R10
1064 MOVQ DX,R11
1065 MOVQ 120(SP),AX
1066 MULQ 104(SP)
1067 MOVQ AX,R12
1068 MOVQ DX,R13
1069 MOVQ 120(SP),AX
1070 MULQ 112(SP)
1071 MOVQ AX,R14
1072 MOVQ DX,R15
1073 MOVQ 128(SP),AX
1074 MULQ 80(SP)
1075 ADDQ AX,R8
1076 ADCQ DX,R9
1077 MOVQ 128(SP),AX
1078 MULQ 88(SP)
1079 ADDQ AX,R10
1080 ADCQ DX,R11
1081 MOVQ 128(SP),AX
1082 MULQ 96(SP)
1083 ADDQ AX,R12
1084 ADCQ DX,R13
1085 MOVQ 128(SP),AX
1086 MULQ 104(SP)
1087 ADDQ AX,R14
1088 ADCQ DX,R15
1089 MOVQ 128(SP),DX
1090 IMUL3Q $19,DX,AX
1091 MULQ 112(SP)
1092 ADDQ AX,SI
1093 ADCQ DX,CX
1094 MOVQ 136(SP),AX
1095 MULQ 80(SP)
1096 ADDQ AX,R10
1097 ADCQ DX,R11
1098 MOVQ 136(SP),AX
1099 MULQ 88(SP)
1100 ADDQ AX,R12
1101 ADCQ DX,R13
1102 MOVQ 136(SP),AX
1103 MULQ 96(SP)
1104 ADDQ AX,R14
1105 ADCQ DX,R15
1106 MOVQ 136(SP),DX
1107 IMUL3Q $19,DX,AX
1108 MULQ 104(SP)
1109 ADDQ AX,SI
1110 ADCQ DX,CX
1111 MOVQ 136(SP),DX
1112 IMUL3Q $19,DX,AX
1113 MULQ 112(SP)
1114 ADDQ AX,R8
1115 ADCQ DX,R9
1116 MOVQ 144(SP),AX
1117 MULQ 80(SP)
1118 ADDQ AX,R12
1119 ADCQ DX,R13
1120 MOVQ 144(SP),AX
1121 MULQ 88(SP)
1122 ADDQ AX,R14
1123 ADCQ DX,R15
1124 MOVQ 0(SP),AX
1125 MULQ 104(SP)
1126 ADDQ AX,R8
1127 ADCQ DX,R9
1128 MOVQ 0(SP),AX
1129 MULQ 112(SP)
1130 ADDQ AX,R10
1131 ADCQ DX,R11
1132 MOVQ 152(SP),AX
1133 MULQ 80(SP)
1134 ADDQ AX,R14
1135 ADCQ DX,R15
1136 MOVQ 8(SP),AX
1137 MULQ 96(SP)
1138 ADDQ AX,R8
1139 ADCQ DX,R9
1140 MOVQ 8(SP),AX
1141 MULQ 104(SP)
1142 ADDQ AX,R10
1143 ADCQ DX,R11
1144 MOVQ 8(SP),AX
1145 MULQ 112(SP)
1146 ADDQ AX,R12
1147 ADCQ DX,R13
1148 MOVQ $REDMASK51,DX
1149 SHLQ $13,CX:SI
1150 ANDQ DX,SI
1151 SHLQ $13,R9:R8
1152 ANDQ DX,R8
1153 ADDQ CX,R8
1154 SHLQ $13,R11:R10
1155 ANDQ DX,R10
1156 ADDQ R9,R10
1157 SHLQ $13,R13:R12
1158 ANDQ DX,R12
1159 ADDQ R11,R12
1160 SHLQ $13,R15:R14
1161 ANDQ DX,R14
1162 ADDQ R13,R14
1163 IMUL3Q $19,R15,CX
1164 ADDQ CX,SI
1165 MOVQ SI,CX
1166 SHRQ $51,CX
1167 ADDQ R8,CX
1168 MOVQ CX,R8
1169 SHRQ $51,CX
1170 ANDQ DX,SI
1171 ADDQ R10,CX
1172 MOVQ CX,R9
1173 SHRQ $51,CX
1174 ANDQ DX,R8
1175 ADDQ R12,CX
1176 MOVQ CX,AX
1177 SHRQ $51,CX
1178 ANDQ DX,R9
1179 ADDQ R14,CX
1180 MOVQ CX,R10
1181 SHRQ $51,CX
1182 ANDQ DX,AX
1183 IMUL3Q $19,CX,CX
1184 ADDQ CX,SI
1185 ANDQ DX,R10
1186 MOVQ SI,40(DI)
1187 MOVQ R8,48(DI)
1188 MOVQ R9,56(DI)
1189 MOVQ AX,64(DI)
1190 MOVQ R10,72(DI)
1191 MOVQ 160(SP),AX
1192 MULQ ·_121666_213(SB)
1193 SHRQ $13,AX
1194 MOVQ AX,SI
1195 MOVQ DX,CX
1196 MOVQ 168(SP),AX
1197 MULQ ·_121666_213(SB)
1198 SHRQ $13,AX
1199 ADDQ AX,CX
1200 MOVQ DX,R8
1201 MOVQ 176(SP),AX
1202 MULQ ·_121666_213(SB)
1203 SHRQ $13,AX
1204 ADDQ AX,R8
1205 MOVQ DX,R9
1206 MOVQ 184(SP),AX
1207 MULQ ·_121666_213(SB)
1208 SHRQ $13,AX
1209 ADDQ AX,R9
1210 MOVQ DX,R10
1211 MOVQ 192(SP),AX
1212 MULQ ·_121666_213(SB)
1213 SHRQ $13,AX
1214 ADDQ AX,R10
1215 IMUL3Q $19,DX,DX
1216 ADDQ DX,SI
1217 ADDQ 80(SP),SI
1218 ADDQ 88(SP),CX
1219 ADDQ 96(SP),R8
1220 ADDQ 104(SP),R9
1221 ADDQ 112(SP),R10
1222 MOVQ SI,80(DI)
1223 MOVQ CX,88(DI)
1224 MOVQ R8,96(DI)
1225 MOVQ R9,104(DI)
1226 MOVQ R10,112(DI)
1227 MOVQ 104(DI),SI
1228 IMUL3Q $19,SI,AX
1229 MOVQ AX,0(SP)
1230 MULQ 176(SP)
1231 MOVQ AX,SI
1232 MOVQ DX,CX
1233 MOVQ 112(DI),DX
1234 IMUL3Q $19,DX,AX
1235 MOVQ AX,8(SP)
1236 MULQ 168(SP)
1237 ADDQ AX,SI
1238 ADCQ DX,CX
1239 MOVQ 80(DI),AX
1240 MULQ 160(SP)
1241 ADDQ AX,SI
1242 ADCQ DX,CX
1243 MOVQ 80(DI),AX
1244 MULQ 168(SP)
1245 MOVQ AX,R8
1246 MOVQ DX,R9
1247 MOVQ 80(DI),AX
1248 MULQ 176(SP)
1249 MOVQ AX,R10
1250 MOVQ DX,R11
1251 MOVQ 80(DI),AX
1252 MULQ 184(SP)
1253 MOVQ AX,R12
1254 MOVQ DX,R13
1255 MOVQ 80(DI),AX
1256 MULQ 192(SP)
1257 MOVQ AX,R14
1258 MOVQ DX,R15
1259 MOVQ 88(DI),AX
1260 MULQ 160(SP)
1261 ADDQ AX,R8
1262 ADCQ DX,R9
1263 MOVQ 88(DI),AX
1264 MULQ 168(SP)
1265 ADDQ AX,R10
1266 ADCQ DX,R11
1267 MOVQ 88(DI),AX
1268 MULQ 176(SP)
1269 ADDQ AX,R12
1270 ADCQ DX,R13
1271 MOVQ 88(DI),AX
1272 MULQ 184(SP)
1273 ADDQ AX,R14
1274 ADCQ DX,R15
1275 MOVQ 88(DI),DX
1276 IMUL3Q $19,DX,AX
1277 MULQ 192(SP)
1278 ADDQ AX,SI
1279 ADCQ DX,CX
1280 MOVQ 96(DI),AX
1281 MULQ 160(SP)
1282 ADDQ AX,R10
1283 ADCQ DX,R11
1284 MOVQ 96(DI),AX
1285 MULQ 168(SP)
1286 ADDQ AX,R12
1287 ADCQ DX,R13
1288 MOVQ 96(DI),AX
1289 MULQ 176(SP)
1290 ADDQ AX,R14
1291 ADCQ DX,R15
1292 MOVQ 96(DI),DX
1293 IMUL3Q $19,DX,AX
1294 MULQ 184(SP)
1295 ADDQ AX,SI
1296 ADCQ DX,CX
1297 MOVQ 96(DI),DX
1298 IMUL3Q $19,DX,AX
1299 MULQ 192(SP)
1300 ADDQ AX,R8
1301 ADCQ DX,R9
1302 MOVQ 104(DI),AX
1303 MULQ 160(SP)
1304 ADDQ AX,R12
1305 ADCQ DX,R13
1306 MOVQ 104(DI),AX
1307 MULQ 168(SP)
1308 ADDQ AX,R14
1309 ADCQ DX,R15
1310 MOVQ 0(SP),AX
1311 MULQ 184(SP)
1312 ADDQ AX,R8
1313 ADCQ DX,R9
1314 MOVQ 0(SP),AX
1315 MULQ 192(SP)
1316 ADDQ AX,R10
1317 ADCQ DX,R11
1318 MOVQ 112(DI),AX
1319 MULQ 160(SP)
1320 ADDQ AX,R14
1321 ADCQ DX,R15
1322 MOVQ 8(SP),AX
1323 MULQ 176(SP)
1324 ADDQ AX,R8
1325 ADCQ DX,R9
1326 MOVQ 8(SP),AX
1327 MULQ 184(SP)
1328 ADDQ AX,R10
1329 ADCQ DX,R11
1330 MOVQ 8(SP),AX
1331 MULQ 192(SP)
1332 ADDQ AX,R12
1333 ADCQ DX,R13
1334 MOVQ $REDMASK51,DX
1335 SHLQ $13,CX:SI
1336 ANDQ DX,SI
1337 SHLQ $13,R9:R8
1338 ANDQ DX,R8
1339 ADDQ CX,R8
1340 SHLQ $13,R11:R10
1341 ANDQ DX,R10
1342 ADDQ R9,R10
1343 SHLQ $13,R13:R12
1344 ANDQ DX,R12
1345 ADDQ R11,R12
1346 SHLQ $13,R15:R14
1347 ANDQ DX,R14
1348 ADDQ R13,R14
1349 IMUL3Q $19,R15,CX
1350 ADDQ CX,SI
1351 MOVQ SI,CX
1352 SHRQ $51,CX
1353 ADDQ R8,CX
1354 MOVQ CX,R8
1355 SHRQ $51,CX
1356 ANDQ DX,SI
1357 ADDQ R10,CX
1358 MOVQ CX,R9
1359 SHRQ $51,CX
1360 ANDQ DX,R8
1361 ADDQ R12,CX
1362 MOVQ CX,AX
1363 SHRQ $51,CX
1364 ANDQ DX,R9
1365 ADDQ R14,CX
1366 MOVQ CX,R10
1367 SHRQ $51,CX
1368 ANDQ DX,AX
1369 IMUL3Q $19,CX,CX
1370 ADDQ CX,SI
1371 ANDQ DX,R10
1372 MOVQ SI,80(DI)
1373 MOVQ R8,88(DI)
1374 MOVQ R9,96(DI)
1375 MOVQ AX,104(DI)
1376 MOVQ R10,112(DI)
1377 RET
diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
new file mode 100644
index 0000000..5822bd5
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
@@ -0,0 +1,240 @@
1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// +build amd64,!gccgo,!appengine
6
7package curve25519
8
9// These functions are implemented in the .s files. The names of the functions
10// in the rest of the file are also taken from the SUPERCOP sources to help
11// people following along.
12
13//go:noescape
14
15func cswap(inout *[5]uint64, v uint64)
16
17//go:noescape
18
19func ladderstep(inout *[5][5]uint64)
20
21//go:noescape
22
23func freeze(inout *[5]uint64)
24
25//go:noescape
26
27func mul(dest, a, b *[5]uint64)
28
29//go:noescape
30
31func square(out, in *[5]uint64)
32
33// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
34func mladder(xr, zr *[5]uint64, s *[32]byte) {
35 var work [5][5]uint64
36
37 work[0] = *xr
38 setint(&work[1], 1)
39 setint(&work[2], 0)
40 work[3] = *xr
41 setint(&work[4], 1)
42
43 j := uint(6)
44 var prevbit byte
45
46 for i := 31; i >= 0; i-- {
47 for j < 8 {
48 bit := ((*s)[i] >> j) & 1
49 swap := bit ^ prevbit
50 prevbit = bit
51 cswap(&work[1], uint64(swap))
52 ladderstep(&work)
53 j--
54 }
55 j = 7
56 }
57
58 *xr = work[1]
59 *zr = work[2]
60}
61
62func scalarMult(out, in, base *[32]byte) {
63 var e [32]byte
64 copy(e[:], (*in)[:])
65 e[0] &= 248
66 e[31] &= 127
67 e[31] |= 64
68
69 var t, z [5]uint64
70 unpack(&t, base)
71 mladder(&t, &z, &e)
72 invert(&z, &z)
73 mul(&t, &t, &z)
74 pack(out, &t)
75}
76
77func setint(r *[5]uint64, v uint64) {
78 r[0] = v
79 r[1] = 0
80 r[2] = 0
81 r[3] = 0
82 r[4] = 0
83}
84
85// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
86// order.
87func unpack(r *[5]uint64, x *[32]byte) {
88 r[0] = uint64(x[0]) |
89 uint64(x[1])<<8 |
90 uint64(x[2])<<16 |
91 uint64(x[3])<<24 |
92 uint64(x[4])<<32 |
93 uint64(x[5])<<40 |
94 uint64(x[6]&7)<<48
95
96 r[1] = uint64(x[6])>>3 |
97 uint64(x[7])<<5 |
98 uint64(x[8])<<13 |
99 uint64(x[9])<<21 |
100 uint64(x[10])<<29 |
101 uint64(x[11])<<37 |
102 uint64(x[12]&63)<<45
103
104 r[2] = uint64(x[12])>>6 |
105 uint64(x[13])<<2 |
106 uint64(x[14])<<10 |
107 uint64(x[15])<<18 |
108 uint64(x[16])<<26 |
109 uint64(x[17])<<34 |
110 uint64(x[18])<<42 |
111 uint64(x[19]&1)<<50
112
113 r[3] = uint64(x[19])>>1 |
114 uint64(x[20])<<7 |
115 uint64(x[21])<<15 |
116 uint64(x[22])<<23 |
117 uint64(x[23])<<31 |
118 uint64(x[24])<<39 |
119 uint64(x[25]&15)<<47
120
121 r[4] = uint64(x[25])>>4 |
122 uint64(x[26])<<4 |
123 uint64(x[27])<<12 |
124 uint64(x[28])<<20 |
125 uint64(x[29])<<28 |
126 uint64(x[30])<<36 |
127 uint64(x[31]&127)<<44
128}
129
130// pack sets out = x where out is the usual, little-endian form of the 5,
131// 51-bit limbs in x.
132func pack(out *[32]byte, x *[5]uint64) {
133 t := *x
134 freeze(&t)
135
136 out[0] = byte(t[0])
137 out[1] = byte(t[0] >> 8)
138 out[2] = byte(t[0] >> 16)
139 out[3] = byte(t[0] >> 24)
140 out[4] = byte(t[0] >> 32)
141 out[5] = byte(t[0] >> 40)
142 out[6] = byte(t[0] >> 48)
143
144 out[6] ^= byte(t[1]<<3) & 0xf8
145 out[7] = byte(t[1] >> 5)
146 out[8] = byte(t[1] >> 13)
147 out[9] = byte(t[1] >> 21)
148 out[10] = byte(t[1] >> 29)
149 out[11] = byte(t[1] >> 37)
150 out[12] = byte(t[1] >> 45)
151
152 out[12] ^= byte(t[2]<<6) & 0xc0
153 out[13] = byte(t[2] >> 2)
154 out[14] = byte(t[2] >> 10)
155 out[15] = byte(t[2] >> 18)
156 out[16] = byte(t[2] >> 26)
157 out[17] = byte(t[2] >> 34)
158 out[18] = byte(t[2] >> 42)
159 out[19] = byte(t[2] >> 50)
160
161 out[19] ^= byte(t[3]<<1) & 0xfe
162 out[20] = byte(t[3] >> 7)
163 out[21] = byte(t[3] >> 15)
164 out[22] = byte(t[3] >> 23)
165 out[23] = byte(t[3] >> 31)
166 out[24] = byte(t[3] >> 39)
167 out[25] = byte(t[3] >> 47)
168
169 out[25] ^= byte(t[4]<<4) & 0xf0
170 out[26] = byte(t[4] >> 4)
171 out[27] = byte(t[4] >> 12)
172 out[28] = byte(t[4] >> 20)
173 out[29] = byte(t[4] >> 28)
174 out[30] = byte(t[4] >> 36)
175 out[31] = byte(t[4] >> 44)
176}
177
178// invert calculates r = x^-1 mod p using Fermat's little theorem.
179func invert(r *[5]uint64, x *[5]uint64) {
180 var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
181
182 square(&z2, x) /* 2 */
183 square(&t, &z2) /* 4 */
184 square(&t, &t) /* 8 */
185 mul(&z9, &t, x) /* 9 */
186 mul(&z11, &z9, &z2) /* 11 */
187 square(&t, &z11) /* 22 */
188 mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
189
190 square(&t, &z2_5_0) /* 2^6 - 2^1 */
191 for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
192 square(&t, &t)
193 }
194 mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
195
196 square(&t, &z2_10_0) /* 2^11 - 2^1 */
197 for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
198 square(&t, &t)
199 }
200 mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
201
202 square(&t, &z2_20_0) /* 2^21 - 2^1 */
203 for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
204 square(&t, &t)
205 }
206 mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
207
208 square(&t, &t) /* 2^41 - 2^1 */
209 for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
210 square(&t, &t)
211 }
212 mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
213
214 square(&t, &z2_50_0) /* 2^51 - 2^1 */
215 for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
216 square(&t, &t)
217 }
218 mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
219
220 square(&t, &z2_100_0) /* 2^101 - 2^1 */
221 for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
222 square(&t, &t)
223 }
224 mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
225
226 square(&t, &t) /* 2^201 - 2^1 */
227 for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
228 square(&t, &t)
229 }
230 mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
231
232 square(&t, &t) /* 2^251 - 2^1 */
233 square(&t, &t) /* 2^252 - 2^2 */
234 square(&t, &t) /* 2^253 - 2^3 */
235
236 square(&t, &t) /* 2^254 - 2^4 */
237
238 square(&t, &t) /* 2^255 - 2^5 */
239 mul(r, &t, &z11) /* 2^255 - 21 */
240}
diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s
new file mode 100644
index 0000000..b162e65
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s
@@ -0,0 +1,169 @@
1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// This code was translated into a form compatible with 6a from the public
6// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
7
8// +build amd64,!gccgo,!appengine
9
10#include "const_amd64.h"
11
12// func mul(dest, a, b *[5]uint64)
13TEXT ·mul(SB),0,$16-24
14 MOVQ dest+0(FP), DI
15 MOVQ a+8(FP), SI
16 MOVQ b+16(FP), DX
17
18 MOVQ DX,CX
19 MOVQ 24(SI),DX
20 IMUL3Q $19,DX,AX
21 MOVQ AX,0(SP)
22 MULQ 16(CX)
23 MOVQ AX,R8
24 MOVQ DX,R9
25 MOVQ 32(SI),DX
26 IMUL3Q $19,DX,AX
27 MOVQ AX,8(SP)
28 MULQ 8(CX)
29 ADDQ AX,R8
30 ADCQ DX,R9
31 MOVQ 0(SI),AX
32 MULQ 0(CX)
33 ADDQ AX,R8
34 ADCQ DX,R9
35 MOVQ 0(SI),AX
36 MULQ 8(CX)
37 MOVQ AX,R10
38 MOVQ DX,R11
39 MOVQ 0(SI),AX
40 MULQ 16(CX)
41 MOVQ AX,R12
42 MOVQ DX,R13
43 MOVQ 0(SI),AX
44 MULQ 24(CX)
45 MOVQ AX,R14
46 MOVQ DX,R15
47 MOVQ 0(SI),AX
48 MULQ 32(CX)
49 MOVQ AX,BX
50 MOVQ DX,BP
51 MOVQ 8(SI),AX
52 MULQ 0(CX)
53 ADDQ AX,R10
54 ADCQ DX,R11
55 MOVQ 8(SI),AX
56 MULQ 8(CX)
57 ADDQ AX,R12
58 ADCQ DX,R13
59 MOVQ 8(SI),AX
60 MULQ 16(CX)
61 ADDQ AX,R14
62 ADCQ DX,R15
63 MOVQ 8(SI),AX
64 MULQ 24(CX)
65 ADDQ AX,BX
66 ADCQ DX,BP
67 MOVQ 8(SI),DX
68 IMUL3Q $19,DX,AX
69 MULQ 32(CX)
70 ADDQ AX,R8
71 ADCQ DX,R9
72 MOVQ 16(SI),AX
73 MULQ 0(CX)
74 ADDQ AX,R12
75 ADCQ DX,R13
76 MOVQ 16(SI),AX
77 MULQ 8(CX)
78 ADDQ AX,R14
79 ADCQ DX,R15
80 MOVQ 16(SI),AX
81 MULQ 16(CX)
82 ADDQ AX,BX
83 ADCQ DX,BP
84 MOVQ 16(SI),DX
85 IMUL3Q $19,DX,AX
86 MULQ 24(CX)
87 ADDQ AX,R8
88 ADCQ DX,R9
89 MOVQ 16(SI),DX
90 IMUL3Q $19,DX,AX
91 MULQ 32(CX)
92 ADDQ AX,R10
93 ADCQ DX,R11
94 MOVQ 24(SI),AX
95 MULQ 0(CX)
96 ADDQ AX,R14
97 ADCQ DX,R15
98 MOVQ 24(SI),AX
99 MULQ 8(CX)
100 ADDQ AX,BX
101 ADCQ DX,BP
102 MOVQ 0(SP),AX
103 MULQ 24(CX)
104 ADDQ AX,R10
105 ADCQ DX,R11
106 MOVQ 0(SP),AX
107 MULQ 32(CX)
108 ADDQ AX,R12
109 ADCQ DX,R13
110 MOVQ 32(SI),AX
111 MULQ 0(CX)
112 ADDQ AX,BX
113 ADCQ DX,BP
114 MOVQ 8(SP),AX
115 MULQ 16(CX)
116 ADDQ AX,R10
117 ADCQ DX,R11
118 MOVQ 8(SP),AX
119 MULQ 24(CX)
120 ADDQ AX,R12
121 ADCQ DX,R13
122 MOVQ 8(SP),AX
123 MULQ 32(CX)
124 ADDQ AX,R14
125 ADCQ DX,R15
126 MOVQ $REDMASK51,SI
127 SHLQ $13,R9:R8
128 ANDQ SI,R8
129 SHLQ $13,R11:R10
130 ANDQ SI,R10
131 ADDQ R9,R10
132 SHLQ $13,R13:R12
133 ANDQ SI,R12
134 ADDQ R11,R12
135 SHLQ $13,R15:R14
136 ANDQ SI,R14
137 ADDQ R13,R14
138 SHLQ $13,BP:BX
139 ANDQ SI,BX
140 ADDQ R15,BX
141 IMUL3Q $19,BP,DX
142 ADDQ DX,R8
143 MOVQ R8,DX
144 SHRQ $51,DX
145 ADDQ R10,DX
146 MOVQ DX,CX
147 SHRQ $51,DX
148 ANDQ SI,R8
149 ADDQ R12,DX
150 MOVQ DX,R9
151 SHRQ $51,DX
152 ANDQ SI,CX
153 ADDQ R14,DX
154 MOVQ DX,AX
155 SHRQ $51,DX
156 ANDQ SI,R9
157 ADDQ BX,DX
158 MOVQ DX,R10
159 SHRQ $51,DX
160 ANDQ SI,AX
161 IMUL3Q $19,DX,DX
162 ADDQ DX,R8
163 ANDQ SI,R10
164 MOVQ R8,0(DI)
165 MOVQ CX,8(DI)
166 MOVQ R9,16(DI)
167 MOVQ AX,24(DI)
168 MOVQ R10,32(DI)
169 RET
diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s
new file mode 100644
index 0000000..4e864a8
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/square_amd64.s
@@ -0,0 +1,132 @@
1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// This code was translated into a form compatible with 6a from the public
6// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
7
8// +build amd64,!gccgo,!appengine
9
10#include "const_amd64.h"
11
12// func square(out, in *[5]uint64)
13TEXT ·square(SB),7,$0-16
14 MOVQ out+0(FP), DI
15 MOVQ in+8(FP), SI
16
17 MOVQ 0(SI),AX
18 MULQ 0(SI)
19 MOVQ AX,CX
20 MOVQ DX,R8
21 MOVQ 0(SI),AX
22 SHLQ $1,AX
23 MULQ 8(SI)
24 MOVQ AX,R9
25 MOVQ DX,R10
26 MOVQ 0(SI),AX
27 SHLQ $1,AX
28 MULQ 16(SI)
29 MOVQ AX,R11
30 MOVQ DX,R12
31 MOVQ 0(SI),AX
32 SHLQ $1,AX
33 MULQ 24(SI)
34 MOVQ AX,R13
35 MOVQ DX,R14
36 MOVQ 0(SI),AX
37 SHLQ $1,AX
38 MULQ 32(SI)
39 MOVQ AX,R15
40 MOVQ DX,BX
41 MOVQ 8(SI),AX
42 MULQ 8(SI)
43 ADDQ AX,R11
44 ADCQ DX,R12
45 MOVQ 8(SI),AX
46 SHLQ $1,AX
47 MULQ 16(SI)
48 ADDQ AX,R13
49 ADCQ DX,R14
50 MOVQ 8(SI),AX
51 SHLQ $1,AX
52 MULQ 24(SI)
53 ADDQ AX,R15
54 ADCQ DX,BX
55 MOVQ 8(SI),DX
56 IMUL3Q $38,DX,AX
57 MULQ 32(SI)
58 ADDQ AX,CX
59 ADCQ DX,R8
60 MOVQ 16(SI),AX
61 MULQ 16(SI)
62 ADDQ AX,R15
63 ADCQ DX,BX
64 MOVQ 16(SI),DX
65 IMUL3Q $38,DX,AX
66 MULQ 24(SI)
67 ADDQ AX,CX
68 ADCQ DX,R8
69 MOVQ 16(SI),DX
70 IMUL3Q $38,DX,AX
71 MULQ 32(SI)
72 ADDQ AX,R9
73 ADCQ DX,R10
74 MOVQ 24(SI),DX
75 IMUL3Q $19,DX,AX
76 MULQ 24(SI)
77 ADDQ AX,R9
78 ADCQ DX,R10
79 MOVQ 24(SI),DX
80 IMUL3Q $38,DX,AX
81 MULQ 32(SI)
82 ADDQ AX,R11
83 ADCQ DX,R12
84 MOVQ 32(SI),DX
85 IMUL3Q $19,DX,AX
86 MULQ 32(SI)
87 ADDQ AX,R13
88 ADCQ DX,R14
89 MOVQ $REDMASK51,SI
90 SHLQ $13,R8:CX
91 ANDQ SI,CX
92 SHLQ $13,R10:R9
93 ANDQ SI,R9
94 ADDQ R8,R9
95 SHLQ $13,R12:R11
96 ANDQ SI,R11
97 ADDQ R10,R11
98 SHLQ $13,R14:R13
99 ANDQ SI,R13
100 ADDQ R12,R13
101 SHLQ $13,BX:R15
102 ANDQ SI,R15
103 ADDQ R14,R15
104 IMUL3Q $19,BX,DX
105 ADDQ DX,CX
106 MOVQ CX,DX
107 SHRQ $51,DX
108 ADDQ R9,DX
109 ANDQ SI,CX
110 MOVQ DX,R8
111 SHRQ $51,DX
112 ADDQ R11,DX
113 ANDQ SI,R8
114 MOVQ DX,R9
115 SHRQ $51,DX
116 ADDQ R13,DX
117 ANDQ SI,R9
118 MOVQ DX,AX
119 SHRQ $51,DX
120 ADDQ R15,DX
121 ANDQ SI,AX
122 MOVQ DX,R10
123 SHRQ $51,DX
124 IMUL3Q $19,DX,DX
125 ADDQ DX,CX
126 ANDQ SI,R10
127 MOVQ CX,0(DI)
128 MOVQ R8,8(DI)
129 MOVQ R9,16(DI)
130 MOVQ AX,24(DI)
131 MOVQ R10,32(DI)
132 RET
diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go
new file mode 100644
index 0000000..f1d9567
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go
@@ -0,0 +1,181 @@
1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package ed25519 implements the Ed25519 signature algorithm. See
6// http://ed25519.cr.yp.to/.
7//
8// These functions are also compatible with the “Ed25519” function defined in
9// https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05.
10package ed25519
11
12// This code is a port of the public domain, “ref10” implementation of ed25519
13// from SUPERCOP.
14
15import (
16 "crypto"
17 cryptorand "crypto/rand"
18 "crypto/sha512"
19 "crypto/subtle"
20 "errors"
21 "io"
22 "strconv"
23
24 "golang.org/x/crypto/ed25519/internal/edwards25519"
25)
26
27const (
28 // PublicKeySize is the size, in bytes, of public keys as used in this package.
29 PublicKeySize = 32
30 // PrivateKeySize is the size, in bytes, of private keys as used in this package.
31 PrivateKeySize = 64
32 // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
33 SignatureSize = 64
34)
35
36// PublicKey is the type of Ed25519 public keys.
37type PublicKey []byte
38
39// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
40type PrivateKey []byte
41
42// Public returns the PublicKey corresponding to priv.
43func (priv PrivateKey) Public() crypto.PublicKey {
44 publicKey := make([]byte, PublicKeySize)
45 copy(publicKey, priv[32:])
46 return PublicKey(publicKey)
47}
48
49// Sign signs the given message with priv.
50// Ed25519 performs two passes over messages to be signed and therefore cannot
51// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
52// indicate the message hasn't been hashed. This can be achieved by passing
53// crypto.Hash(0) as the value for opts.
54func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
55 if opts.HashFunc() != crypto.Hash(0) {
56 return nil, errors.New("ed25519: cannot sign hashed message")
57 }
58
59 return Sign(priv, message), nil
60}
61
62// GenerateKey generates a public/private key pair using entropy from rand.
63// If rand is nil, crypto/rand.Reader will be used.
64func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) {
65 if rand == nil {
66 rand = cryptorand.Reader
67 }
68
69 privateKey = make([]byte, PrivateKeySize)
70 publicKey = make([]byte, PublicKeySize)
71 _, err = io.ReadFull(rand, privateKey[:32])
72 if err != nil {
73 return nil, nil, err
74 }
75
76 digest := sha512.Sum512(privateKey[:32])
77 digest[0] &= 248
78 digest[31] &= 127
79 digest[31] |= 64
80
81 var A edwards25519.ExtendedGroupElement
82 var hBytes [32]byte
83 copy(hBytes[:], digest[:])
84 edwards25519.GeScalarMultBase(&A, &hBytes)
85 var publicKeyBytes [32]byte
86 A.ToBytes(&publicKeyBytes)
87
88 copy(privateKey[32:], publicKeyBytes[:])
89 copy(publicKey, publicKeyBytes[:])
90
91 return publicKey, privateKey, nil
92}
93
94// Sign signs the message with privateKey and returns a signature. It will
95// panic if len(privateKey) is not PrivateKeySize.
96func Sign(privateKey PrivateKey, message []byte) []byte {
97 if l := len(privateKey); l != PrivateKeySize {
98 panic("ed25519: bad private key length: " + strconv.Itoa(l))
99 }
100
101 h := sha512.New()
102 h.Write(privateKey[:32])
103
104 var digest1, messageDigest, hramDigest [64]byte
105 var expandedSecretKey [32]byte
106 h.Sum(digest1[:0])
107 copy(expandedSecretKey[:], digest1[:])
108 expandedSecretKey[0] &= 248
109 expandedSecretKey[31] &= 63
110 expandedSecretKey[31] |= 64
111
112 h.Reset()
113 h.Write(digest1[32:])
114 h.Write(message)
115 h.Sum(messageDigest[:0])
116
117 var messageDigestReduced [32]byte
118 edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
119 var R edwards25519.ExtendedGroupElement
120 edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
121
122 var encodedR [32]byte
123 R.ToBytes(&encodedR)
124
125 h.Reset()
126 h.Write(encodedR[:])
127 h.Write(privateKey[32:])
128 h.Write(message)
129 h.Sum(hramDigest[:0])
130 var hramDigestReduced [32]byte
131 edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
132
133 var s [32]byte
134 edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
135
136 signature := make([]byte, SignatureSize)
137 copy(signature[:], encodedR[:])
138 copy(signature[32:], s[:])
139
140 return signature
141}
142
143// Verify reports whether sig is a valid signature of message by publicKey. It
144// will panic if len(publicKey) is not PublicKeySize.
145func Verify(publicKey PublicKey, message, sig []byte) bool {
146 if l := len(publicKey); l != PublicKeySize {
147 panic("ed25519: bad public key length: " + strconv.Itoa(l))
148 }
149
150 if len(sig) != SignatureSize || sig[63]&224 != 0 {
151 return false
152 }
153
154 var A edwards25519.ExtendedGroupElement
155 var publicKeyBytes [32]byte
156 copy(publicKeyBytes[:], publicKey)
157 if !A.FromBytes(&publicKeyBytes) {
158 return false
159 }
160 edwards25519.FeNeg(&A.X, &A.X)
161 edwards25519.FeNeg(&A.T, &A.T)
162
163 h := sha512.New()
164 h.Write(sig[:32])
165 h.Write(publicKey[:])
166 h.Write(message)
167 var digest [64]byte
168 h.Sum(digest[:0])
169
170 var hReduced [32]byte
171 edwards25519.ScReduce(&hReduced, &digest)
172
173 var R edwards25519.ProjectiveGroupElement
174 var b [32]byte
175 copy(b[:], sig[32:])
176 edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)
177
178 var checkR [32]byte
179 R.ToBytes(&checkR)
180 return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1
181}
diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
new file mode 100644
index 0000000..e39f086
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
@@ -0,0 +1,1422 @@
1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package edwards25519
6
7// These values are from the public domain, “ref10” implementation of ed25519
8// from SUPERCOP.
9
10// d is a constant in the Edwards curve equation.
11var d = FieldElement{
12 -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116,
13}
14
15// d2 is 2*d.
16var d2 = FieldElement{
17 -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199,
18}
19
20// SqrtM1 is the square-root of -1 in the field.
21var SqrtM1 = FieldElement{
22 -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482,
23}
24
25// A is a constant in the Montgomery-form of curve25519.
26var A = FieldElement{
27 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0,
28}
29
30// bi contains precomputed multiples of the base-point. See the Ed25519 paper
31// for a discussion about how these values are used.
32var bi = [8]PreComputedGroupElement{
33 {
34 FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605},
35 FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378},
36 FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546},
37 },
38 {
39 FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024},
40 FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574},
41 FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357},
42 },
43 {
44 FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380},
45 FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306},
46 FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942},
47 },
48 {
49 FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766},
50 FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701},
51 FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300},
52 },
53 {
54 FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877},
55 FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951},
56 FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784},
57 },
58 {
59 FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436},
60 FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918},
61 FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877},
62 },
63 {
64 FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800},
65 FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305},
66 FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300},
67 },
68 {
69 FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876},
70 FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619},
71 FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683},
72 },
73}
74
75// base contains precomputed multiples of the base-point. See the Ed25519 paper
76// for a discussion about how these values are used.
77var base = [32][8]PreComputedGroupElement{
78 {
79 {
80 FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605},
81 FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378},
82 FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546},
83 },
84 {
85 FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303},
86 FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081},
87 FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697},
88 },
89 {
90 FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024},
91 FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574},
92 FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357},
93 },
94 {
95 FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540},
96 FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397},
97 FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325},
98 },
99 {
100 FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380},
101 FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306},
102 FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942},
103 },
104 {
105 FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777},
106 FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737},
107 FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652},
108 },
109 {
110 FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766},
111 FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701},
112 FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300},
113 },
114 {
115 FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726},
116 FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955},
117 FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425},
118 },
119 },
120 {
121 {
122 FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171},
123 FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510},
124 FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660},
125 },
126 {
127 FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639},
128 FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963},
129 FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950},
130 },
131 {
132 FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568},
133 FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335},
134 FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628},
135 },
136 {
137 FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007},
138 FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772},
139 FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653},
140 },
141 {
142 FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567},
143 FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686},
144 FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372},
145 },
146 {
147 FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887},
148 FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954},
149 FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953},
150 },
151 {
152 FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833},
153 FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532},
154 FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876},
155 },
156 {
157 FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268},
158 FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214},
159 FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038},
160 },
161 },
162 {
163 {
164 FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800},
165 FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645},
166 FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664},
167 },
168 {
169 FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933},
170 FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182},
171 FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222},
172 },
173 {
174 FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991},
175 FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880},
176 FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092},
177 },
178 {
179 FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295},
180 FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788},
181 FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553},
182 },
183 {
184 FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026},
185 FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347},
186 FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033},
187 },
188 {
189 FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395},
190 FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278},
191 FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890},
192 },
193 {
194 FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995},
195 FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596},
196 FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891},
197 },
198 {
199 FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060},
200 FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608},
201 FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606},
202 },
203 },
204 {
205 {
206 FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389},
207 FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016},
208 FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341},
209 },
210 {
211 FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505},
212 FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553},
213 FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655},
214 },
215 {
216 FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220},
217 FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631},
218 FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099},
219 },
220 {
221 FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556},
222 FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749},
223 FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930},
224 },
225 {
226 FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391},
227 FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253},
228 FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066},
229 },
230 {
231 FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958},
232 FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082},
233 FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383},
234 },
235 {
236 FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521},
237 FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807},
238 FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948},
239 },
240 {
241 FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134},
242 FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455},
243 FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629},
244 },
245 },
246 {
247 {
248 FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069},
249 FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746},
250 FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919},
251 },
252 {
253 FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837},
254 FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906},
255 FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771},
256 },
257 {
258 FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817},
259 FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098},
260 FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409},
261 },
262 {
263 FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504},
264 FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727},
265 FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420},
266 },
267 {
268 FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003},
269 FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605},
270 FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384},
271 },
272 {
273 FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701},
274 FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683},
275 FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708},
276 },
277 {
278 FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563},
279 FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260},
280 FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387},
281 },
282 {
283 FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672},
284 FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686},
285 FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665},
286 },
287 },
288 {
289 {
290 FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182},
291 FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277},
292 FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628},
293 },
294 {
295 FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474},
296 FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539},
297 FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822},
298 },
299 {
300 FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970},
301 FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756},
302 FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508},
303 },
304 {
305 FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683},
306 FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655},
307 FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158},
308 },
309 {
310 FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125},
311 FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839},
312 FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664},
313 },
314 {
315 FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294},
316 FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899},
317 FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070},
318 },
319 {
320 FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294},
321 FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949},
322 FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083},
323 },
324 {
325 FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420},
326 FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940},
327 FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396},
328 },
329 },
330 {
331 {
332 FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567},
333 FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127},
334 FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294},
335 },
336 {
337 FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887},
338 FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964},
339 FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195},
340 },
341 {
342 FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244},
343 FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999},
344 FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762},
345 },
346 {
347 FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274},
348 FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236},
349 FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605},
350 },
351 {
352 FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761},
353 FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884},
354 FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482},
355 },
356 {
357 FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638},
358 FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490},
359 FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170},
360 },
361 {
362 FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736},
363 FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124},
364 FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392},
365 },
366 {
367 FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029},
368 FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048},
369 FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958},
370 },
371 },
372 {
373 {
374 FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593},
375 FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071},
376 FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692},
377 },
378 {
379 FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687},
380 FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441},
381 FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001},
382 },
383 {
384 FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460},
385 FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007},
386 FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762},
387 },
388 {
389 FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005},
390 FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674},
391 FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035},
392 },
393 {
394 FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590},
395 FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957},
396 FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812},
397 },
398 {
399 FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740},
400 FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122},
401 FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158},
402 },
403 {
404 FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885},
405 FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140},
406 FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857},
407 },
408 {
409 FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155},
410 FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260},
411 FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483},
412 },
413 },
414 {
415 {
416 FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677},
417 FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815},
418 FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751},
419 },
420 {
421 FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203},
422 FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208},
423 FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230},
424 },
425 {
426 FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850},
427 FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389},
428 FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968},
429 },
430 {
431 FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689},
432 FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880},
433 FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304},
434 },
435 {
436 FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632},
437 FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412},
438 FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566},
439 },
440 {
441 FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038},
442 FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232},
443 FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943},
444 },
445 {
446 FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856},
447 FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738},
448 FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971},
449 },
450 {
451 FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718},
452 FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697},
453 FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883},
454 },
455 },
456 {
457 {
458 FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912},
459 FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358},
460 FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849},
461 },
462 {
463 FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307},
464 FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977},
465 FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335},
466 },
467 {
468 FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644},
469 FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616},
470 FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735},
471 },
472 {
473 FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099},
474 FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341},
475 FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336},
476 },
477 {
478 FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646},
479 FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425},
480 FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388},
481 },
482 {
483 FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743},
484 FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822},
485 FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462},
486 },
487 {
488 FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985},
489 FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702},
490 FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797},
491 },
492 {
493 FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293},
494 FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100},
495 FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688},
496 },
497 },
498 {
499 {
500 FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186},
501 FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610},
502 FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707},
503 },
504 {
505 FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220},
506 FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025},
507 FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044},
508 },
509 {
510 FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992},
511 FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027},
512 FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197},
513 },
514 {
515 FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901},
516 FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952},
517 FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878},
518 },
519 {
520 FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390},
521 FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730},
522 FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730},
523 },
524 {
525 FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180},
526 FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272},
527 FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715},
528 },
529 {
530 FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970},
531 FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772},
532 FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865},
533 },
534 {
535 FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750},
536 FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373},
537 FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348},
538 },
539 },
540 {
541 {
542 FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144},
543 FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195},
544 FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086},
545 },
546 {
547 FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684},
548 FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518},
549 FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233},
550 },
551 {
552 FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793},
553 FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794},
554 FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435},
555 },
556 {
557 FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921},
558 FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518},
559 FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563},
560 },
561 {
562 FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278},
563 FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024},
564 FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030},
565 },
566 {
567 FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783},
568 FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717},
569 FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844},
570 },
571 {
572 FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333},
573 FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048},
574 FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760},
575 },
576 {
577 FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760},
578 FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757},
579 FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112},
580 },
581 },
582 {
583 {
584 FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468},
585 FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184},
586 FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289},
587 },
588 {
589 FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066},
590 FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882},
591 FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226},
592 },
593 {
594 FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101},
595 FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279},
596 FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811},
597 },
598 {
599 FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709},
600 FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714},
601 FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121},
602 },
603 {
604 FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464},
605 FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847},
606 FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400},
607 },
608 {
609 FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414},
610 FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158},
611 FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045},
612 },
613 {
614 FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415},
615 FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459},
616 FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079},
617 },
618 {
619 FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412},
620 FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743},
621 FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836},
622 },
623 },
624 {
625 {
626 FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022},
627 FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429},
628 FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065},
629 },
630 {
631 FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861},
632 FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000},
633 FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101},
634 },
635 {
636 FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815},
637 FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642},
638 FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966},
639 },
640 {
641 FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574},
642 FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742},
643 FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689},
644 },
645 {
646 FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020},
647 FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772},
648 FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982},
649 },
650 {
651 FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953},
652 FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218},
653 FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265},
654 },
655 {
656 FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073},
657 FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325},
658 FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798},
659 },
660 {
661 FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870},
662 FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863},
663 FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927},
664 },
665 },
666 {
667 {
668 FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267},
669 FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663},
670 FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862},
671 },
672 {
673 FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673},
674 FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943},
675 FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020},
676 },
677 {
678 FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238},
679 FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064},
680 FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795},
681 },
682 {
683 FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052},
684 FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904},
685 FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531},
686 },
687 {
688 FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979},
689 FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841},
690 FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431},
691 },
692 {
693 FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324},
694 FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940},
695 FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320},
696 },
697 {
698 FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184},
699 FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114},
700 FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878},
701 },
702 {
703 FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784},
704 FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091},
705 FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585},
706 },
707 },
708 {
709 {
710 FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208},
711 FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864},
712 FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661},
713 },
714 {
715 FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233},
716 FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212},
717 FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525},
718 },
719 {
720 FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068},
721 FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397},
722 FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988},
723 },
724 {
725 FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889},
726 FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038},
727 FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697},
728 },
729 {
730 FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875},
731 FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905},
732 FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656},
733 },
734 {
735 FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818},
736 FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714},
737 FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203},
738 },
739 {
740 FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931},
741 FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024},
742 FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084},
743 },
744 {
745 FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204},
746 FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817},
747 FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667},
748 },
749 },
750 {
751 {
752 FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504},
753 FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768},
754 FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255},
755 },
756 {
757 FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790},
758 FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438},
759 FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333},
760 },
761 {
762 FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971},
763 FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905},
764 FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409},
765 },
766 {
767 FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409},
768 FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499},
769 FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363},
770 },
771 {
772 FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664},
773 FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324},
774 FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940},
775 },
776 {
777 FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990},
778 FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914},
779 FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290},
780 },
781 {
782 FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257},
783 FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433},
784 FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236},
785 },
786 {
787 FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045},
788 FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093},
789 FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347},
790 },
791 },
792 {
793 {
794 FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191},
795 FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507},
796 FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906},
797 },
798 {
799 FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018},
800 FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109},
801 FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926},
802 },
803 {
804 FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528},
805 FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625},
806 FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286},
807 },
808 {
809 FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033},
810 FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866},
811 FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896},
812 },
813 {
814 FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075},
815 FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347},
816 FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437},
817 },
818 {
819 FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165},
820 FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588},
821 FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193},
822 },
823 {
824 FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017},
825 FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883},
826 FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961},
827 },
828 {
829 FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043},
830 FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663},
831 FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362},
832 },
833 },
834 {
835 {
836 FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860},
837 FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466},
838 FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063},
839 },
840 {
841 FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997},
842 FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295},
843 FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369},
844 },
845 {
846 FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385},
847 FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109},
848 FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906},
849 },
850 {
851 FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424},
852 FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185},
853 FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962},
854 },
855 {
856 FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325},
857 FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593},
858 FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404},
859 },
860 {
861 FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644},
862 FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801},
863 FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804},
864 },
865 {
866 FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884},
867 FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577},
868 FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849},
869 },
870 {
871 FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473},
872 FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644},
873 FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319},
874 },
875 },
876 {
877 {
878 FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599},
879 FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768},
880 FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084},
881 },
882 {
883 FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328},
884 FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369},
885 FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920},
886 },
887 {
888 FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815},
889 FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025},
890 FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397},
891 },
892 {
893 FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448},
894 FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981},
895 FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165},
896 },
897 {
898 FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501},
899 FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073},
900 FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861},
901 },
902 {
903 FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845},
904 FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211},
905 FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870},
906 },
907 {
908 FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096},
909 FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803},
910 FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168},
911 },
912 {
913 FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965},
914 FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505},
915 FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598},
916 },
917 },
918 {
919 {
920 FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782},
921 FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900},
922 FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479},
923 },
924 {
925 FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208},
926 FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232},
927 FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719},
928 },
929 {
930 FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271},
931 FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326},
932 FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132},
933 },
934 {
935 FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300},
936 FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570},
937 FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670},
938 },
939 {
940 FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994},
941 FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913},
942 FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317},
943 },
944 {
945 FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730},
946 FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096},
947 FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078},
948 },
949 {
950 FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411},
951 FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905},
952 FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654},
953 },
954 {
955 FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870},
956 FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498},
957 FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579},
958 },
959 },
960 {
961 {
962 FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677},
963 FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647},
964 FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743},
965 },
966 {
967 FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468},
968 FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375},
969 FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155},
970 },
971 {
972 FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725},
973 FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612},
974 FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943},
975 },
976 {
977 FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944},
978 FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928},
979 FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406},
980 },
981 {
982 FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139},
983 FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963},
984 FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693},
985 },
986 {
987 FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734},
988 FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680},
989 FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410},
990 },
991 {
992 FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931},
993 FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654},
994 FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710},
995 },
996 {
997 FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180},
998 FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684},
999 FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895},
1000 },
1001 },
1002 {
1003 {
1004 FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501},
1005 FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413},
1006 FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880},
1007 },
1008 {
1009 FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874},
1010 FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962},
1011 FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899},
1012 },
1013 {
1014 FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152},
1015 FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063},
1016 FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080},
1017 },
1018 {
1019 FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146},
1020 FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183},
1021 FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133},
1022 },
1023 {
1024 FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421},
1025 FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622},
1026 FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197},
1027 },
1028 {
1029 FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663},
1030 FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753},
1031 FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755},
1032 },
1033 {
1034 FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862},
1035 FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118},
1036 FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171},
1037 },
1038 {
1039 FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380},
1040 FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824},
1041 FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270},
1042 },
1043 },
1044 {
1045 {
1046 FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438},
1047 FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584},
1048 FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562},
1049 },
1050 {
1051 FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471},
1052 FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610},
1053 FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269},
1054 },
1055 {
1056 FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650},
1057 FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369},
1058 FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461},
1059 },
1060 {
1061 FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462},
1062 FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793},
1063 FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218},
1064 },
1065 {
1066 FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226},
1067 FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019},
1068 FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037},
1069 },
1070 {
1071 FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171},
1072 FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132},
1073 FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841},
1074 },
1075 {
1076 FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181},
1077 FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210},
1078 FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040},
1079 },
1080 {
1081 FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935},
1082 FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105},
1083 FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814},
1084 },
1085 },
1086 {
1087 {
1088 FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852},
1089 FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581},
1090 FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646},
1091 },
1092 {
1093 FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844},
1094 FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025},
1095 FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453},
1096 },
1097 {
1098 FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068},
1099 FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192},
1100 FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921},
1101 },
1102 {
1103 FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259},
1104 FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426},
1105 FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072},
1106 },
1107 {
1108 FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305},
1109 FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832},
1110 FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943},
1111 },
1112 {
1113 FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011},
1114 FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447},
1115 FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494},
1116 },
1117 {
1118 FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245},
1119 FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859},
1120 FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915},
1121 },
1122 {
1123 FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707},
1124 FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848},
1125 FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224},
1126 },
1127 },
1128 {
1129 {
1130 FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391},
1131 FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215},
1132 FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101},
1133 },
1134 {
1135 FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713},
1136 FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849},
1137 FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930},
1138 },
1139 {
1140 FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940},
1141 FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031},
1142 FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404},
1143 },
1144 {
1145 FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243},
1146 FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116},
1147 FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525},
1148 },
1149 {
1150 FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509},
1151 FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883},
1152 FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865},
1153 },
1154 {
1155 FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660},
1156 FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273},
1157 FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138},
1158 },
1159 {
1160 FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560},
1161 FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135},
1162 FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941},
1163 },
1164 {
1165 FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739},
1166 FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756},
1167 FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819},
1168 },
1169 },
1170 {
1171 {
1172 FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347},
1173 FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028},
1174 FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075},
1175 },
1176 {
1177 FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799},
1178 FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609},
1179 FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817},
1180 },
1181 {
1182 FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989},
1183 FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523},
1184 FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278},
1185 },
1186 {
1187 FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045},
1188 FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377},
1189 FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480},
1190 },
1191 {
1192 FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016},
1193 FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426},
1194 FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525},
1195 },
1196 {
1197 FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396},
1198 FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080},
1199 FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892},
1200 },
1201 {
1202 FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275},
1203 FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074},
1204 FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140},
1205 },
1206 {
1207 FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717},
1208 FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101},
1209 FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127},
1210 },
1211 },
1212 {
1213 {
1214 FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632},
1215 FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415},
1216 FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160},
1217 },
1218 {
1219 FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876},
1220 FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625},
1221 FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478},
1222 },
1223 {
1224 FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164},
1225 FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595},
1226 FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248},
1227 },
1228 {
1229 FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858},
1230 FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193},
1231 FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184},
1232 },
1233 {
1234 FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942},
1235 FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635},
1236 FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948},
1237 },
1238 {
1239 FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935},
1240 FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415},
1241 FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416},
1242 },
1243 {
1244 FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018},
1245 FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778},
1246 FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659},
1247 },
1248 {
1249 FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385},
1250 FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503},
1251 FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329},
1252 },
1253 },
1254 {
1255 {
1256 FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056},
1257 FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838},
1258 FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948},
1259 },
1260 {
1261 FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691},
1262 FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118},
1263 FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517},
1264 },
1265 {
1266 FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269},
1267 FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904},
1268 FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589},
1269 },
1270 {
1271 FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193},
1272 FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910},
1273 FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930},
1274 },
1275 {
1276 FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667},
1277 FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481},
1278 FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876},
1279 },
1280 {
1281 FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640},
1282 FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278},
1283 FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112},
1284 },
1285 {
1286 FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272},
1287 FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012},
1288 FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221},
1289 },
1290 {
1291 FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046},
1292 FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345},
1293 FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310},
1294 },
1295 },
1296 {
1297 {
1298 FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937},
1299 FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636},
1300 FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008},
1301 },
1302 {
1303 FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429},
1304 FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576},
1305 FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066},
1306 },
1307 {
1308 FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490},
1309 FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104},
1310 FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053},
1311 },
1312 {
1313 FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275},
1314 FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511},
1315 FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095},
1316 },
1317 {
1318 FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439},
1319 FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939},
1320 FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424},
1321 },
1322 {
1323 FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310},
1324 FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608},
1325 FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079},
1326 },
1327 {
1328 FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101},
1329 FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418},
1330 FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576},
1331 },
1332 {
1333 FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356},
1334 FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996},
1335 FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099},
1336 },
1337 },
1338 {
1339 {
1340 FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728},
1341 FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658},
1342 FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242},
1343 },
1344 {
1345 FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001},
1346 FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766},
1347 FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373},
1348 },
1349 {
1350 FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458},
1351 FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628},
1352 FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657},
1353 },
1354 {
1355 FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062},
1356 FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616},
1357 FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014},
1358 },
1359 {
1360 FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383},
1361 FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814},
1362 FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718},
1363 },
1364 {
1365 FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417},
1366 FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222},
1367 FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444},
1368 },
1369 {
1370 FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597},
1371 FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970},
1372 FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799},
1373 },
1374 {
1375 FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647},
1376 FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511},
1377 FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032},
1378 },
1379 },
1380 {
1381 {
1382 FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834},
1383 FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461},
1384 FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062},
1385 },
1386 {
1387 FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516},
1388 FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547},
1389 FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240},
1390 },
1391 {
1392 FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038},
1393 FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741},
1394 FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103},
1395 },
1396 {
1397 FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747},
1398 FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323},
1399 FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016},
1400 },
1401 {
1402 FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373},
1403 FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228},
1404 FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141},
1405 },
1406 {
1407 FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399},
1408 FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831},
1409 FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376},
1410 },
1411 {
1412 FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313},
1413 FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958},
1414 FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577},
1415 },
1416 {
1417 FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743},
1418 FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684},
1419 FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476},
1420 },
1421 },
1422}
diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
new file mode 100644
index 0000000..5f8b994
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
@@ -0,0 +1,1771 @@
1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package edwards25519
6
7// This code is a port of the public domain, “ref10” implementation of ed25519
8// from SUPERCOP.
9
10// FieldElement represents an element of the field GF(2^255 - 19). An element
11// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
12// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
13// context.
14type FieldElement [10]int32
15
16var zero FieldElement
17
18func FeZero(fe *FieldElement) {
19 copy(fe[:], zero[:])
20}
21
22func FeOne(fe *FieldElement) {
23 FeZero(fe)
24 fe[0] = 1
25}
26
27func FeAdd(dst, a, b *FieldElement) {
28 dst[0] = a[0] + b[0]
29 dst[1] = a[1] + b[1]
30 dst[2] = a[2] + b[2]
31 dst[3] = a[3] + b[3]
32 dst[4] = a[4] + b[4]
33 dst[5] = a[5] + b[5]
34 dst[6] = a[6] + b[6]
35 dst[7] = a[7] + b[7]
36 dst[8] = a[8] + b[8]
37 dst[9] = a[9] + b[9]
38}
39
40func FeSub(dst, a, b *FieldElement) {
41 dst[0] = a[0] - b[0]
42 dst[1] = a[1] - b[1]
43 dst[2] = a[2] - b[2]
44 dst[3] = a[3] - b[3]
45 dst[4] = a[4] - b[4]
46 dst[5] = a[5] - b[5]
47 dst[6] = a[6] - b[6]
48 dst[7] = a[7] - b[7]
49 dst[8] = a[8] - b[8]
50 dst[9] = a[9] - b[9]
51}
52
53func FeCopy(dst, src *FieldElement) {
54 copy(dst[:], src[:])
55}
56
57// Replace (f,g) with (g,g) if b == 1;
58// replace (f,g) with (f,g) if b == 0.
59//
60// Preconditions: b in {0,1}.
61func FeCMove(f, g *FieldElement, b int32) {
62 b = -b
63 f[0] ^= b & (f[0] ^ g[0])
64 f[1] ^= b & (f[1] ^ g[1])
65 f[2] ^= b & (f[2] ^ g[2])
66 f[3] ^= b & (f[3] ^ g[3])
67 f[4] ^= b & (f[4] ^ g[4])
68 f[5] ^= b & (f[5] ^ g[5])
69 f[6] ^= b & (f[6] ^ g[6])
70 f[7] ^= b & (f[7] ^ g[7])
71 f[8] ^= b & (f[8] ^ g[8])
72 f[9] ^= b & (f[9] ^ g[9])
73}
74
75func load3(in []byte) int64 {
76 var r int64
77 r = int64(in[0])
78 r |= int64(in[1]) << 8
79 r |= int64(in[2]) << 16
80 return r
81}
82
83func load4(in []byte) int64 {
84 var r int64
85 r = int64(in[0])
86 r |= int64(in[1]) << 8
87 r |= int64(in[2]) << 16
88 r |= int64(in[3]) << 24
89 return r
90}
91
92func FeFromBytes(dst *FieldElement, src *[32]byte) {
93 h0 := load4(src[:])
94 h1 := load3(src[4:]) << 6
95 h2 := load3(src[7:]) << 5
96 h3 := load3(src[10:]) << 3
97 h4 := load3(src[13:]) << 2
98 h5 := load4(src[16:])
99 h6 := load3(src[20:]) << 7
100 h7 := load3(src[23:]) << 5
101 h8 := load3(src[26:]) << 4
102 h9 := (load3(src[29:]) & 8388607) << 2
103
104 FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
105}
106
107// FeToBytes marshals h to s.
108// Preconditions:
109// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
110//
111// Write p=2^255-19; q=floor(h/p).
112// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
113//
114// Proof:
115// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
116// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
117//
118// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
119// Then 0<y<1.
120//
121// Write r=h-pq.
122// Have 0<=r<=p-1=2^255-20.
123// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
124//
125// Write x=r+19(2^-255)r+y.
126// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
127//
128// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
129// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
130func FeToBytes(s *[32]byte, h *FieldElement) {
131 var carry [10]int32
132
133 q := (19*h[9] + (1 << 24)) >> 25
134 q = (h[0] + q) >> 26
135 q = (h[1] + q) >> 25
136 q = (h[2] + q) >> 26
137 q = (h[3] + q) >> 25
138 q = (h[4] + q) >> 26
139 q = (h[5] + q) >> 25
140 q = (h[6] + q) >> 26
141 q = (h[7] + q) >> 25
142 q = (h[8] + q) >> 26
143 q = (h[9] + q) >> 25
144
145 // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
146 h[0] += 19 * q
147 // Goal: Output h-2^255 q, which is between 0 and 2^255-20.
148
149 carry[0] = h[0] >> 26
150 h[1] += carry[0]
151 h[0] -= carry[0] << 26
152 carry[1] = h[1] >> 25
153 h[2] += carry[1]
154 h[1] -= carry[1] << 25
155 carry[2] = h[2] >> 26
156 h[3] += carry[2]
157 h[2] -= carry[2] << 26
158 carry[3] = h[3] >> 25
159 h[4] += carry[3]
160 h[3] -= carry[3] << 25
161 carry[4] = h[4] >> 26
162 h[5] += carry[4]
163 h[4] -= carry[4] << 26
164 carry[5] = h[5] >> 25
165 h[6] += carry[5]
166 h[5] -= carry[5] << 25
167 carry[6] = h[6] >> 26
168 h[7] += carry[6]
169 h[6] -= carry[6] << 26
170 carry[7] = h[7] >> 25
171 h[8] += carry[7]
172 h[7] -= carry[7] << 25
173 carry[8] = h[8] >> 26
174 h[9] += carry[8]
175 h[8] -= carry[8] << 26
176 carry[9] = h[9] >> 25
177 h[9] -= carry[9] << 25
178 // h10 = carry9
179
180 // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
181 // Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
182 // evidently 2^255 h10-2^255 q = 0.
183 // Goal: Output h[0]+...+2^230 h[9].
184
185 s[0] = byte(h[0] >> 0)
186 s[1] = byte(h[0] >> 8)
187 s[2] = byte(h[0] >> 16)
188 s[3] = byte((h[0] >> 24) | (h[1] << 2))
189 s[4] = byte(h[1] >> 6)
190 s[5] = byte(h[1] >> 14)
191 s[6] = byte((h[1] >> 22) | (h[2] << 3))
192 s[7] = byte(h[2] >> 5)
193 s[8] = byte(h[2] >> 13)
194 s[9] = byte((h[2] >> 21) | (h[3] << 5))
195 s[10] = byte(h[3] >> 3)
196 s[11] = byte(h[3] >> 11)
197 s[12] = byte((h[3] >> 19) | (h[4] << 6))
198 s[13] = byte(h[4] >> 2)
199 s[14] = byte(h[4] >> 10)
200 s[15] = byte(h[4] >> 18)
201 s[16] = byte(h[5] >> 0)
202 s[17] = byte(h[5] >> 8)
203 s[18] = byte(h[5] >> 16)
204 s[19] = byte((h[5] >> 24) | (h[6] << 1))
205 s[20] = byte(h[6] >> 7)
206 s[21] = byte(h[6] >> 15)
207 s[22] = byte((h[6] >> 23) | (h[7] << 3))
208 s[23] = byte(h[7] >> 5)
209 s[24] = byte(h[7] >> 13)
210 s[25] = byte((h[7] >> 21) | (h[8] << 4))
211 s[26] = byte(h[8] >> 4)
212 s[27] = byte(h[8] >> 12)
213 s[28] = byte((h[8] >> 20) | (h[9] << 6))
214 s[29] = byte(h[9] >> 2)
215 s[30] = byte(h[9] >> 10)
216 s[31] = byte(h[9] >> 18)
217}
218
219func FeIsNegative(f *FieldElement) byte {
220 var s [32]byte
221 FeToBytes(&s, f)
222 return s[0] & 1
223}
224
225func FeIsNonZero(f *FieldElement) int32 {
226 var s [32]byte
227 FeToBytes(&s, f)
228 var x uint8
229 for _, b := range s {
230 x |= b
231 }
232 x |= x >> 4
233 x |= x >> 2
234 x |= x >> 1
235 return int32(x & 1)
236}
237
238// FeNeg sets h = -f
239//
240// Preconditions:
241// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
242//
243// Postconditions:
244// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
245func FeNeg(h, f *FieldElement) {
246 h[0] = -f[0]
247 h[1] = -f[1]
248 h[2] = -f[2]
249 h[3] = -f[3]
250 h[4] = -f[4]
251 h[5] = -f[5]
252 h[6] = -f[6]
253 h[7] = -f[7]
254 h[8] = -f[8]
255 h[9] = -f[9]
256}
257
258func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) {
259 var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64
260
261 /*
262 |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
263 i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
264 |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
265 i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
266 */
267
268 c0 = (h0 + (1 << 25)) >> 26
269 h1 += c0
270 h0 -= c0 << 26
271 c4 = (h4 + (1 << 25)) >> 26
272 h5 += c4
273 h4 -= c4 << 26
274 /* |h0| <= 2^25 */
275 /* |h4| <= 2^25 */
276 /* |h1| <= 1.51*2^58 */
277 /* |h5| <= 1.51*2^58 */
278
279 c1 = (h1 + (1 << 24)) >> 25
280 h2 += c1
281 h1 -= c1 << 25
282 c5 = (h5 + (1 << 24)) >> 25
283 h6 += c5
284 h5 -= c5 << 25
285 /* |h1| <= 2^24; from now on fits into int32 */
286 /* |h5| <= 2^24; from now on fits into int32 */
287 /* |h2| <= 1.21*2^59 */
288 /* |h6| <= 1.21*2^59 */
289
290 c2 = (h2 + (1 << 25)) >> 26
291 h3 += c2
292 h2 -= c2 << 26
293 c6 = (h6 + (1 << 25)) >> 26
294 h7 += c6
295 h6 -= c6 << 26
296 /* |h2| <= 2^25; from now on fits into int32 unchanged */
297 /* |h6| <= 2^25; from now on fits into int32 unchanged */
298 /* |h3| <= 1.51*2^58 */
299 /* |h7| <= 1.51*2^58 */
300
301 c3 = (h3 + (1 << 24)) >> 25
302 h4 += c3
303 h3 -= c3 << 25
304 c7 = (h7 + (1 << 24)) >> 25
305 h8 += c7
306 h7 -= c7 << 25
307 /* |h3| <= 2^24; from now on fits into int32 unchanged */
308 /* |h7| <= 2^24; from now on fits into int32 unchanged */
309 /* |h4| <= 1.52*2^33 */
310 /* |h8| <= 1.52*2^33 */
311
312 c4 = (h4 + (1 << 25)) >> 26
313 h5 += c4
314 h4 -= c4 << 26
315 c8 = (h8 + (1 << 25)) >> 26
316 h9 += c8
317 h8 -= c8 << 26
318 /* |h4| <= 2^25; from now on fits into int32 unchanged */
319 /* |h8| <= 2^25; from now on fits into int32 unchanged */
320 /* |h5| <= 1.01*2^24 */
321 /* |h9| <= 1.51*2^58 */
322
323 c9 = (h9 + (1 << 24)) >> 25
324 h0 += c9 * 19
325 h9 -= c9 << 25
326 /* |h9| <= 2^24; from now on fits into int32 unchanged */
327 /* |h0| <= 1.8*2^37 */
328
329 c0 = (h0 + (1 << 25)) >> 26
330 h1 += c0
331 h0 -= c0 << 26
332 /* |h0| <= 2^25; from now on fits into int32 unchanged */
333 /* |h1| <= 1.01*2^24 */
334
335 h[0] = int32(h0)
336 h[1] = int32(h1)
337 h[2] = int32(h2)
338 h[3] = int32(h3)
339 h[4] = int32(h4)
340 h[5] = int32(h5)
341 h[6] = int32(h6)
342 h[7] = int32(h7)
343 h[8] = int32(h8)
344 h[9] = int32(h9)
345}
346
347// FeMul calculates h = f * g
348// Can overlap h with f or g.
349//
350// Preconditions:
351// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
352// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
353//
354// Postconditions:
355// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
356//
357// Notes on implementation strategy:
358//
359// Using schoolbook multiplication.
360// Karatsuba would save a little in some cost models.
361//
362// Most multiplications by 2 and 19 are 32-bit precomputations;
363// cheaper than 64-bit postcomputations.
364//
365// There is one remaining multiplication by 19 in the carry chain;
366// one *19 precomputation can be merged into this,
367// but the resulting data flow is considerably less clean.
368//
369// There are 12 carries below.
370// 10 of them are 2-way parallelizable and vectorizable.
371// Can get away with 11 carries, but then data flow is much deeper.
372//
373// With tighter constraints on inputs, can squeeze carries into int32.
374func FeMul(h, f, g *FieldElement) {
375 f0 := int64(f[0])
376 f1 := int64(f[1])
377 f2 := int64(f[2])
378 f3 := int64(f[3])
379 f4 := int64(f[4])
380 f5 := int64(f[5])
381 f6 := int64(f[6])
382 f7 := int64(f[7])
383 f8 := int64(f[8])
384 f9 := int64(f[9])
385
386 f1_2 := int64(2 * f[1])
387 f3_2 := int64(2 * f[3])
388 f5_2 := int64(2 * f[5])
389 f7_2 := int64(2 * f[7])
390 f9_2 := int64(2 * f[9])
391
392 g0 := int64(g[0])
393 g1 := int64(g[1])
394 g2 := int64(g[2])
395 g3 := int64(g[3])
396 g4 := int64(g[4])
397 g5 := int64(g[5])
398 g6 := int64(g[6])
399 g7 := int64(g[7])
400 g8 := int64(g[8])
401 g9 := int64(g[9])
402
403 g1_19 := int64(19 * g[1]) /* 1.4*2^29 */
404 g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */
405 g3_19 := int64(19 * g[3])
406 g4_19 := int64(19 * g[4])
407 g5_19 := int64(19 * g[5])
408 g6_19 := int64(19 * g[6])
409 g7_19 := int64(19 * g[7])
410 g8_19 := int64(19 * g[8])
411 g9_19 := int64(19 * g[9])
412
413 h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19
414 h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19
415 h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19
416 h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19
417 h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19
418 h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19
419 h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19
420 h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19
421 h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19
422 h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0
423
424 FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
425}
426
427func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) {
428 f0 := int64(f[0])
429 f1 := int64(f[1])
430 f2 := int64(f[2])
431 f3 := int64(f[3])
432 f4 := int64(f[4])
433 f5 := int64(f[5])
434 f6 := int64(f[6])
435 f7 := int64(f[7])
436 f8 := int64(f[8])
437 f9 := int64(f[9])
438 f0_2 := int64(2 * f[0])
439 f1_2 := int64(2 * f[1])
440 f2_2 := int64(2 * f[2])
441 f3_2 := int64(2 * f[3])
442 f4_2 := int64(2 * f[4])
443 f5_2 := int64(2 * f[5])
444 f6_2 := int64(2 * f[6])
445 f7_2 := int64(2 * f[7])
446 f5_38 := 38 * f5 // 1.31*2^30
447 f6_19 := 19 * f6 // 1.31*2^30
448 f7_38 := 38 * f7 // 1.31*2^30
449 f8_19 := 19 * f8 // 1.31*2^30
450 f9_38 := 38 * f9 // 1.31*2^30
451
452 h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38
453 h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19
454 h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19
455 h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38
456 h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38
457 h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19
458 h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19
459 h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38
460 h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38
461 h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5
462
463 return
464}
465
466// FeSquare calculates h = f*f. Can overlap h with f.
467//
468// Preconditions:
469// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
470//
471// Postconditions:
472// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
473func FeSquare(h, f *FieldElement) {
474 h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f)
475 FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
476}
477
478// FeSquare2 sets h = 2 * f * f
479//
480// Can overlap h with f.
481//
482// Preconditions:
483// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
484//
485// Postconditions:
486// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
487// See fe_mul.c for discussion of implementation strategy.
488func FeSquare2(h, f *FieldElement) {
489 h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f)
490
491 h0 += h0
492 h1 += h1
493 h2 += h2
494 h3 += h3
495 h4 += h4
496 h5 += h5
497 h6 += h6
498 h7 += h7
499 h8 += h8
500 h9 += h9
501
502 FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
503}
504
505func FeInvert(out, z *FieldElement) {
506 var t0, t1, t2, t3 FieldElement
507 var i int
508
509 FeSquare(&t0, z) // 2^1
510 FeSquare(&t1, &t0) // 2^2
511 for i = 1; i < 2; i++ { // 2^3
512 FeSquare(&t1, &t1)
513 }
514 FeMul(&t1, z, &t1) // 2^3 + 2^0
515 FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0
516 FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1
517 FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0
518 FeSquare(&t2, &t1) // 5,4,3,2,1
519 for i = 1; i < 5; i++ { // 9,8,7,6,5
520 FeSquare(&t2, &t2)
521 }
522 FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0
523 FeSquare(&t2, &t1) // 10..1
524 for i = 1; i < 10; i++ { // 19..10
525 FeSquare(&t2, &t2)
526 }
527 FeMul(&t2, &t2, &t1) // 19..0
528 FeSquare(&t3, &t2) // 20..1
529 for i = 1; i < 20; i++ { // 39..20
530 FeSquare(&t3, &t3)
531 }
532 FeMul(&t2, &t3, &t2) // 39..0
533 FeSquare(&t2, &t2) // 40..1
534 for i = 1; i < 10; i++ { // 49..10
535 FeSquare(&t2, &t2)
536 }
537 FeMul(&t1, &t2, &t1) // 49..0
538 FeSquare(&t2, &t1) // 50..1
539 for i = 1; i < 50; i++ { // 99..50
540 FeSquare(&t2, &t2)
541 }
542 FeMul(&t2, &t2, &t1) // 99..0
543 FeSquare(&t3, &t2) // 100..1
544 for i = 1; i < 100; i++ { // 199..100
545 FeSquare(&t3, &t3)
546 }
547 FeMul(&t2, &t3, &t2) // 199..0
548 FeSquare(&t2, &t2) // 200..1
549 for i = 1; i < 50; i++ { // 249..50
550 FeSquare(&t2, &t2)
551 }
552 FeMul(&t1, &t2, &t1) // 249..0
553 FeSquare(&t1, &t1) // 250..1
554 for i = 1; i < 5; i++ { // 254..5
555 FeSquare(&t1, &t1)
556 }
557 FeMul(out, &t1, &t0) // 254..5,3,1,0
558}
559
560func fePow22523(out, z *FieldElement) {
561 var t0, t1, t2 FieldElement
562 var i int
563
564 FeSquare(&t0, z)
565 for i = 1; i < 1; i++ {
566 FeSquare(&t0, &t0)
567 }
568 FeSquare(&t1, &t0)
569 for i = 1; i < 2; i++ {
570 FeSquare(&t1, &t1)
571 }
572 FeMul(&t1, z, &t1)
573 FeMul(&t0, &t0, &t1)
574 FeSquare(&t0, &t0)
575 for i = 1; i < 1; i++ {
576 FeSquare(&t0, &t0)
577 }
578 FeMul(&t0, &t1, &t0)
579 FeSquare(&t1, &t0)
580 for i = 1; i < 5; i++ {
581 FeSquare(&t1, &t1)
582 }
583 FeMul(&t0, &t1, &t0)
584 FeSquare(&t1, &t0)
585 for i = 1; i < 10; i++ {
586 FeSquare(&t1, &t1)
587 }
588 FeMul(&t1, &t1, &t0)
589 FeSquare(&t2, &t1)
590 for i = 1; i < 20; i++ {
591 FeSquare(&t2, &t2)
592 }
593 FeMul(&t1, &t2, &t1)
594 FeSquare(&t1, &t1)
595 for i = 1; i < 10; i++ {
596 FeSquare(&t1, &t1)
597 }
598 FeMul(&t0, &t1, &t0)
599 FeSquare(&t1, &t0)
600 for i = 1; i < 50; i++ {
601 FeSquare(&t1, &t1)
602 }
603 FeMul(&t1, &t1, &t0)
604 FeSquare(&t2, &t1)
605 for i = 1; i < 100; i++ {
606 FeSquare(&t2, &t2)
607 }
608 FeMul(&t1, &t2, &t1)
609 FeSquare(&t1, &t1)
610 for i = 1; i < 50; i++ {
611 FeSquare(&t1, &t1)
612 }
613 FeMul(&t0, &t1, &t0)
614 FeSquare(&t0, &t0)
615 for i = 1; i < 2; i++ {
616 FeSquare(&t0, &t0)
617 }
618 FeMul(out, &t0, z)
619}
620
621// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 *
622// y^2 where d = -121665/121666.
623//
624// Several representations are used:
625// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z
626// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT
627// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T
628// PreComputedGroupElement: (y+x,y-x,2dxy)
629
630type ProjectiveGroupElement struct {
631 X, Y, Z FieldElement
632}
633
634type ExtendedGroupElement struct {
635 X, Y, Z, T FieldElement
636}
637
638type CompletedGroupElement struct {
639 X, Y, Z, T FieldElement
640}
641
642type PreComputedGroupElement struct {
643 yPlusX, yMinusX, xy2d FieldElement
644}
645
646type CachedGroupElement struct {
647 yPlusX, yMinusX, Z, T2d FieldElement
648}
649
650func (p *ProjectiveGroupElement) Zero() {
651 FeZero(&p.X)
652 FeOne(&p.Y)
653 FeOne(&p.Z)
654}
655
656func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) {
657 var t0 FieldElement
658
659 FeSquare(&r.X, &p.X)
660 FeSquare(&r.Z, &p.Y)
661 FeSquare2(&r.T, &p.Z)
662 FeAdd(&r.Y, &p.X, &p.Y)
663 FeSquare(&t0, &r.Y)
664 FeAdd(&r.Y, &r.Z, &r.X)
665 FeSub(&r.Z, &r.Z, &r.X)
666 FeSub(&r.X, &t0, &r.Y)
667 FeSub(&r.T, &r.T, &r.Z)
668}
669
670func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) {
671 var recip, x, y FieldElement
672
673 FeInvert(&recip, &p.Z)
674 FeMul(&x, &p.X, &recip)
675 FeMul(&y, &p.Y, &recip)
676 FeToBytes(s, &y)
677 s[31] ^= FeIsNegative(&x) << 7
678}
679
680func (p *ExtendedGroupElement) Zero() {
681 FeZero(&p.X)
682 FeOne(&p.Y)
683 FeOne(&p.Z)
684 FeZero(&p.T)
685}
686
687func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) {
688 var q ProjectiveGroupElement
689 p.ToProjective(&q)
690 q.Double(r)
691}
692
693func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) {
694 FeAdd(&r.yPlusX, &p.Y, &p.X)
695 FeSub(&r.yMinusX, &p.Y, &p.X)
696 FeCopy(&r.Z, &p.Z)
697 FeMul(&r.T2d, &p.T, &d2)
698}
699
700func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) {
701 FeCopy(&r.X, &p.X)
702 FeCopy(&r.Y, &p.Y)
703 FeCopy(&r.Z, &p.Z)
704}
705
706func (p *ExtendedGroupElement) ToBytes(s *[32]byte) {
707 var recip, x, y FieldElement
708
709 FeInvert(&recip, &p.Z)
710 FeMul(&x, &p.X, &recip)
711 FeMul(&y, &p.Y, &recip)
712 FeToBytes(s, &y)
713 s[31] ^= FeIsNegative(&x) << 7
714}
715
716func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool {
717 var u, v, v3, vxx, check FieldElement
718
719 FeFromBytes(&p.Y, s)
720 FeOne(&p.Z)
721 FeSquare(&u, &p.Y)
722 FeMul(&v, &u, &d)
723 FeSub(&u, &u, &p.Z) // y = y^2-1
724 FeAdd(&v, &v, &p.Z) // v = dy^2+1
725
726 FeSquare(&v3, &v)
727 FeMul(&v3, &v3, &v) // v3 = v^3
728 FeSquare(&p.X, &v3)
729 FeMul(&p.X, &p.X, &v)
730 FeMul(&p.X, &p.X, &u) // x = uv^7
731
732 fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8)
733 FeMul(&p.X, &p.X, &v3)
734 FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8)
735
736 var tmpX, tmp2 [32]byte
737
738 FeSquare(&vxx, &p.X)
739 FeMul(&vxx, &vxx, &v)
740 FeSub(&check, &vxx, &u) // vx^2-u
741 if FeIsNonZero(&check) == 1 {
742 FeAdd(&check, &vxx, &u) // vx^2+u
743 if FeIsNonZero(&check) == 1 {
744 return false
745 }
746 FeMul(&p.X, &p.X, &SqrtM1)
747
748 FeToBytes(&tmpX, &p.X)
749 for i, v := range tmpX {
750 tmp2[31-i] = v
751 }
752 }
753
754 if FeIsNegative(&p.X) != (s[31] >> 7) {
755 FeNeg(&p.X, &p.X)
756 }
757
758 FeMul(&p.T, &p.X, &p.Y)
759 return true
760}
761
762func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) {
763 FeMul(&r.X, &p.X, &p.T)
764 FeMul(&r.Y, &p.Y, &p.Z)
765 FeMul(&r.Z, &p.Z, &p.T)
766}
767
768func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) {
769 FeMul(&r.X, &p.X, &p.T)
770 FeMul(&r.Y, &p.Y, &p.Z)
771 FeMul(&r.Z, &p.Z, &p.T)
772 FeMul(&r.T, &p.X, &p.Y)
773}
774
775func (p *PreComputedGroupElement) Zero() {
776 FeOne(&p.yPlusX)
777 FeOne(&p.yMinusX)
778 FeZero(&p.xy2d)
779}
780
781func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) {
782 var t0 FieldElement
783
784 FeAdd(&r.X, &p.Y, &p.X)
785 FeSub(&r.Y, &p.Y, &p.X)
786 FeMul(&r.Z, &r.X, &q.yPlusX)
787 FeMul(&r.Y, &r.Y, &q.yMinusX)
788 FeMul(&r.T, &q.T2d, &p.T)
789 FeMul(&r.X, &p.Z, &q.Z)
790 FeAdd(&t0, &r.X, &r.X)
791 FeSub(&r.X, &r.Z, &r.Y)
792 FeAdd(&r.Y, &r.Z, &r.Y)
793 FeAdd(&r.Z, &t0, &r.T)
794 FeSub(&r.T, &t0, &r.T)
795}
796
797func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) {
798 var t0 FieldElement
799
800 FeAdd(&r.X, &p.Y, &p.X)
801 FeSub(&r.Y, &p.Y, &p.X)
802 FeMul(&r.Z, &r.X, &q.yMinusX)
803 FeMul(&r.Y, &r.Y, &q.yPlusX)
804 FeMul(&r.T, &q.T2d, &p.T)
805 FeMul(&r.X, &p.Z, &q.Z)
806 FeAdd(&t0, &r.X, &r.X)
807 FeSub(&r.X, &r.Z, &r.Y)
808 FeAdd(&r.Y, &r.Z, &r.Y)
809 FeSub(&r.Z, &t0, &r.T)
810 FeAdd(&r.T, &t0, &r.T)
811}
812
813func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) {
814 var t0 FieldElement
815
816 FeAdd(&r.X, &p.Y, &p.X)
817 FeSub(&r.Y, &p.Y, &p.X)
818 FeMul(&r.Z, &r.X, &q.yPlusX)
819 FeMul(&r.Y, &r.Y, &q.yMinusX)
820 FeMul(&r.T, &q.xy2d, &p.T)
821 FeAdd(&t0, &p.Z, &p.Z)
822 FeSub(&r.X, &r.Z, &r.Y)
823 FeAdd(&r.Y, &r.Z, &r.Y)
824 FeAdd(&r.Z, &t0, &r.T)
825 FeSub(&r.T, &t0, &r.T)
826}
827
828func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) {
829 var t0 FieldElement
830
831 FeAdd(&r.X, &p.Y, &p.X)
832 FeSub(&r.Y, &p.Y, &p.X)
833 FeMul(&r.Z, &r.X, &q.yMinusX)
834 FeMul(&r.Y, &r.Y, &q.yPlusX)
835 FeMul(&r.T, &q.xy2d, &p.T)
836 FeAdd(&t0, &p.Z, &p.Z)
837 FeSub(&r.X, &r.Z, &r.Y)
838 FeAdd(&r.Y, &r.Z, &r.Y)
839 FeSub(&r.Z, &t0, &r.T)
840 FeAdd(&r.T, &t0, &r.T)
841}
842
843func slide(r *[256]int8, a *[32]byte) {
844 for i := range r {
845 r[i] = int8(1 & (a[i>>3] >> uint(i&7)))
846 }
847
848 for i := range r {
849 if r[i] != 0 {
850 for b := 1; b <= 6 && i+b < 256; b++ {
851 if r[i+b] != 0 {
852 if r[i]+(r[i+b]<<uint(b)) <= 15 {
853 r[i] += r[i+b] << uint(b)
854 r[i+b] = 0
855 } else if r[i]-(r[i+b]<<uint(b)) >= -15 {
856 r[i] -= r[i+b] << uint(b)
857 for k := i + b; k < 256; k++ {
858 if r[k] == 0 {
859 r[k] = 1
860 break
861 }
862 r[k] = 0
863 }
864 } else {
865 break
866 }
867 }
868 }
869 }
870 }
871}
872
873// GeDoubleScalarMultVartime sets r = a*A + b*B
874// where a = a[0]+256*a[1]+...+256^31 a[31].
875// and b = b[0]+256*b[1]+...+256^31 b[31].
876// B is the Ed25519 base point (x,4/5) with x positive.
877func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) {
878 var aSlide, bSlide [256]int8
879 var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A
880 var t CompletedGroupElement
881 var u, A2 ExtendedGroupElement
882 var i int
883
884 slide(&aSlide, a)
885 slide(&bSlide, b)
886
887 A.ToCached(&Ai[0])
888 A.Double(&t)
889 t.ToExtended(&A2)
890
891 for i := 0; i < 7; i++ {
892 geAdd(&t, &A2, &Ai[i])
893 t.ToExtended(&u)
894 u.ToCached(&Ai[i+1])
895 }
896
897 r.Zero()
898
899 for i = 255; i >= 0; i-- {
900 if aSlide[i] != 0 || bSlide[i] != 0 {
901 break
902 }
903 }
904
905 for ; i >= 0; i-- {
906 r.Double(&t)
907
908 if aSlide[i] > 0 {
909 t.ToExtended(&u)
910 geAdd(&t, &u, &Ai[aSlide[i]/2])
911 } else if aSlide[i] < 0 {
912 t.ToExtended(&u)
913 geSub(&t, &u, &Ai[(-aSlide[i])/2])
914 }
915
916 if bSlide[i] > 0 {
917 t.ToExtended(&u)
918 geMixedAdd(&t, &u, &bi[bSlide[i]/2])
919 } else if bSlide[i] < 0 {
920 t.ToExtended(&u)
921 geMixedSub(&t, &u, &bi[(-bSlide[i])/2])
922 }
923
924 t.ToProjective(r)
925 }
926}
927
928// equal returns 1 if b == c and 0 otherwise, assuming that b and c are
929// non-negative.
930func equal(b, c int32) int32 {
931 x := uint32(b ^ c)
932 x--
933 return int32(x >> 31)
934}
935
936// negative returns 1 if b < 0 and 0 otherwise.
937func negative(b int32) int32 {
938 return (b >> 31) & 1
939}
940
941func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) {
942 FeCMove(&t.yPlusX, &u.yPlusX, b)
943 FeCMove(&t.yMinusX, &u.yMinusX, b)
944 FeCMove(&t.xy2d, &u.xy2d, b)
945}
946
947func selectPoint(t *PreComputedGroupElement, pos int32, b int32) {
948 var minusT PreComputedGroupElement
949 bNegative := negative(b)
950 bAbs := b - (((-bNegative) & b) << 1)
951
952 t.Zero()
953 for i := int32(0); i < 8; i++ {
954 PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1))
955 }
956 FeCopy(&minusT.yPlusX, &t.yMinusX)
957 FeCopy(&minusT.yMinusX, &t.yPlusX)
958 FeNeg(&minusT.xy2d, &t.xy2d)
959 PreComputedGroupElementCMove(t, &minusT, bNegative)
960}
961
962// GeScalarMultBase computes h = a*B, where
963// a = a[0]+256*a[1]+...+256^31 a[31]
964// B is the Ed25519 base point (x,4/5) with x positive.
965//
966// Preconditions:
967// a[31] <= 127
968func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) {
969 var e [64]int8
970
971 for i, v := range a {
972 e[2*i] = int8(v & 15)
973 e[2*i+1] = int8((v >> 4) & 15)
974 }
975
976 // each e[i] is between 0 and 15 and e[63] is between 0 and 7.
977
978 carry := int8(0)
979 for i := 0; i < 63; i++ {
980 e[i] += carry
981 carry = (e[i] + 8) >> 4
982 e[i] -= carry << 4
983 }
984 e[63] += carry
985 // each e[i] is between -8 and 8.
986
987 h.Zero()
988 var t PreComputedGroupElement
989 var r CompletedGroupElement
990 for i := int32(1); i < 64; i += 2 {
991 selectPoint(&t, i/2, int32(e[i]))
992 geMixedAdd(&r, h, &t)
993 r.ToExtended(h)
994 }
995
996 var s ProjectiveGroupElement
997
998 h.Double(&r)
999 r.ToProjective(&s)
1000 s.Double(&r)
1001 r.ToProjective(&s)
1002 s.Double(&r)
1003 r.ToProjective(&s)
1004 s.Double(&r)
1005 r.ToExtended(h)
1006
1007 for i := int32(0); i < 64; i += 2 {
1008 selectPoint(&t, i/2, int32(e[i]))
1009 geMixedAdd(&r, h, &t)
1010 r.ToExtended(h)
1011 }
1012}
1013
1014// The scalars are GF(2^252 + 27742317777372353535851937790883648493).
1015
1016// Input:
1017// a[0]+256*a[1]+...+256^31*a[31] = a
1018// b[0]+256*b[1]+...+256^31*b[31] = b
1019// c[0]+256*c[1]+...+256^31*c[31] = c
1020//
1021// Output:
1022// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l
1023// where l = 2^252 + 27742317777372353535851937790883648493.
1024func ScMulAdd(s, a, b, c *[32]byte) {
1025 a0 := 2097151 & load3(a[:])
1026 a1 := 2097151 & (load4(a[2:]) >> 5)
1027 a2 := 2097151 & (load3(a[5:]) >> 2)
1028 a3 := 2097151 & (load4(a[7:]) >> 7)
1029 a4 := 2097151 & (load4(a[10:]) >> 4)
1030 a5 := 2097151 & (load3(a[13:]) >> 1)
1031 a6 := 2097151 & (load4(a[15:]) >> 6)
1032 a7 := 2097151 & (load3(a[18:]) >> 3)
1033 a8 := 2097151 & load3(a[21:])
1034 a9 := 2097151 & (load4(a[23:]) >> 5)
1035 a10 := 2097151 & (load3(a[26:]) >> 2)
1036 a11 := (load4(a[28:]) >> 7)
1037 b0 := 2097151 & load3(b[:])
1038 b1 := 2097151 & (load4(b[2:]) >> 5)
1039 b2 := 2097151 & (load3(b[5:]) >> 2)
1040 b3 := 2097151 & (load4(b[7:]) >> 7)
1041 b4 := 2097151 & (load4(b[10:]) >> 4)
1042 b5 := 2097151 & (load3(b[13:]) >> 1)
1043 b6 := 2097151 & (load4(b[15:]) >> 6)
1044 b7 := 2097151 & (load3(b[18:]) >> 3)
1045 b8 := 2097151 & load3(b[21:])
1046 b9 := 2097151 & (load4(b[23:]) >> 5)
1047 b10 := 2097151 & (load3(b[26:]) >> 2)
1048 b11 := (load4(b[28:]) >> 7)
1049 c0 := 2097151 & load3(c[:])
1050 c1 := 2097151 & (load4(c[2:]) >> 5)
1051 c2 := 2097151 & (load3(c[5:]) >> 2)
1052 c3 := 2097151 & (load4(c[7:]) >> 7)
1053 c4 := 2097151 & (load4(c[10:]) >> 4)
1054 c5 := 2097151 & (load3(c[13:]) >> 1)
1055 c6 := 2097151 & (load4(c[15:]) >> 6)
1056 c7 := 2097151 & (load3(c[18:]) >> 3)
1057 c8 := 2097151 & load3(c[21:])
1058 c9 := 2097151 & (load4(c[23:]) >> 5)
1059 c10 := 2097151 & (load3(c[26:]) >> 2)
1060 c11 := (load4(c[28:]) >> 7)
1061 var carry [23]int64
1062
1063 s0 := c0 + a0*b0
1064 s1 := c1 + a0*b1 + a1*b0
1065 s2 := c2 + a0*b2 + a1*b1 + a2*b0
1066 s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0
1067 s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0
1068 s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0
1069 s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0
1070 s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0
1071 s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0
1072 s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0
1073 s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0
1074 s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0
1075 s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1
1076 s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2
1077 s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3
1078 s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4
1079 s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5
1080 s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6
1081 s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7
1082 s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8
1083 s20 := a9*b11 + a10*b10 + a11*b9
1084 s21 := a10*b11 + a11*b10
1085 s22 := a11 * b11
1086 s23 := int64(0)
1087
1088 carry[0] = (s0 + (1 << 20)) >> 21
1089 s1 += carry[0]
1090 s0 -= carry[0] << 21
1091 carry[2] = (s2 + (1 << 20)) >> 21
1092 s3 += carry[2]
1093 s2 -= carry[2] << 21
1094 carry[4] = (s4 + (1 << 20)) >> 21
1095 s5 += carry[4]
1096 s4 -= carry[4] << 21
1097 carry[6] = (s6 + (1 << 20)) >> 21
1098 s7 += carry[6]
1099 s6 -= carry[6] << 21
1100 carry[8] = (s8 + (1 << 20)) >> 21
1101 s9 += carry[8]
1102 s8 -= carry[8] << 21
1103 carry[10] = (s10 + (1 << 20)) >> 21
1104 s11 += carry[10]
1105 s10 -= carry[10] << 21
1106 carry[12] = (s12 + (1 << 20)) >> 21
1107 s13 += carry[12]
1108 s12 -= carry[12] << 21
1109 carry[14] = (s14 + (1 << 20)) >> 21
1110 s15 += carry[14]
1111 s14 -= carry[14] << 21
1112 carry[16] = (s16 + (1 << 20)) >> 21
1113 s17 += carry[16]
1114 s16 -= carry[16] << 21
1115 carry[18] = (s18 + (1 << 20)) >> 21
1116 s19 += carry[18]
1117 s18 -= carry[18] << 21
1118 carry[20] = (s20 + (1 << 20)) >> 21
1119 s21 += carry[20]
1120 s20 -= carry[20] << 21
1121 carry[22] = (s22 + (1 << 20)) >> 21
1122 s23 += carry[22]
1123 s22 -= carry[22] << 21
1124
1125 carry[1] = (s1 + (1 << 20)) >> 21
1126 s2 += carry[1]
1127 s1 -= carry[1] << 21
1128 carry[3] = (s3 + (1 << 20)) >> 21
1129 s4 += carry[3]
1130 s3 -= carry[3] << 21
1131 carry[5] = (s5 + (1 << 20)) >> 21
1132 s6 += carry[5]
1133 s5 -= carry[5] << 21
1134 carry[7] = (s7 + (1 << 20)) >> 21
1135 s8 += carry[7]
1136 s7 -= carry[7] << 21
1137 carry[9] = (s9 + (1 << 20)) >> 21
1138 s10 += carry[9]
1139 s9 -= carry[9] << 21
1140 carry[11] = (s11 + (1 << 20)) >> 21
1141 s12 += carry[11]
1142 s11 -= carry[11] << 21
1143 carry[13] = (s13 + (1 << 20)) >> 21
1144 s14 += carry[13]
1145 s13 -= carry[13] << 21
1146 carry[15] = (s15 + (1 << 20)) >> 21
1147 s16 += carry[15]
1148 s15 -= carry[15] << 21
1149 carry[17] = (s17 + (1 << 20)) >> 21
1150 s18 += carry[17]
1151 s17 -= carry[17] << 21
1152 carry[19] = (s19 + (1 << 20)) >> 21
1153 s20 += carry[19]
1154 s19 -= carry[19] << 21
1155 carry[21] = (s21 + (1 << 20)) >> 21
1156 s22 += carry[21]
1157 s21 -= carry[21] << 21
1158
1159 s11 += s23 * 666643
1160 s12 += s23 * 470296
1161 s13 += s23 * 654183
1162 s14 -= s23 * 997805
1163 s15 += s23 * 136657
1164 s16 -= s23 * 683901
1165 s23 = 0
1166
1167 s10 += s22 * 666643
1168 s11 += s22 * 470296
1169 s12 += s22 * 654183
1170 s13 -= s22 * 997805
1171 s14 += s22 * 136657
1172 s15 -= s22 * 683901
1173 s22 = 0
1174
1175 s9 += s21 * 666643
1176 s10 += s21 * 470296
1177 s11 += s21 * 654183
1178 s12 -= s21 * 997805
1179 s13 += s21 * 136657
1180 s14 -= s21 * 683901
1181 s21 = 0
1182
1183 s8 += s20 * 666643
1184 s9 += s20 * 470296
1185 s10 += s20 * 654183
1186 s11 -= s20 * 997805
1187 s12 += s20 * 136657
1188 s13 -= s20 * 683901
1189 s20 = 0
1190
1191 s7 += s19 * 666643
1192 s8 += s19 * 470296
1193 s9 += s19 * 654183
1194 s10 -= s19 * 997805
1195 s11 += s19 * 136657
1196 s12 -= s19 * 683901
1197 s19 = 0
1198
1199 s6 += s18 * 666643
1200 s7 += s18 * 470296
1201 s8 += s18 * 654183
1202 s9 -= s18 * 997805
1203 s10 += s18 * 136657
1204 s11 -= s18 * 683901
1205 s18 = 0
1206
1207 carry[6] = (s6 + (1 << 20)) >> 21
1208 s7 += carry[6]
1209 s6 -= carry[6] << 21
1210 carry[8] = (s8 + (1 << 20)) >> 21
1211 s9 += carry[8]
1212 s8 -= carry[8] << 21
1213 carry[10] = (s10 + (1 << 20)) >> 21
1214 s11 += carry[10]
1215 s10 -= carry[10] << 21
1216 carry[12] = (s12 + (1 << 20)) >> 21
1217 s13 += carry[12]
1218 s12 -= carry[12] << 21
1219 carry[14] = (s14 + (1 << 20)) >> 21
1220 s15 += carry[14]
1221 s14 -= carry[14] << 21
1222 carry[16] = (s16 + (1 << 20)) >> 21
1223 s17 += carry[16]
1224 s16 -= carry[16] << 21
1225
1226 carry[7] = (s7 + (1 << 20)) >> 21
1227 s8 += carry[7]
1228 s7 -= carry[7] << 21
1229 carry[9] = (s9 + (1 << 20)) >> 21
1230 s10 += carry[9]
1231 s9 -= carry[9] << 21
1232 carry[11] = (s11 + (1 << 20)) >> 21
1233 s12 += carry[11]
1234 s11 -= carry[11] << 21
1235 carry[13] = (s13 + (1 << 20)) >> 21
1236 s14 += carry[13]
1237 s13 -= carry[13] << 21
1238 carry[15] = (s15 + (1 << 20)) >> 21
1239 s16 += carry[15]
1240 s15 -= carry[15] << 21
1241
1242 s5 += s17 * 666643
1243 s6 += s17 * 470296
1244 s7 += s17 * 654183
1245 s8 -= s17 * 997805
1246 s9 += s17 * 136657
1247 s10 -= s17 * 683901
1248 s17 = 0
1249
1250 s4 += s16 * 666643
1251 s5 += s16 * 470296
1252 s6 += s16 * 654183
1253 s7 -= s16 * 997805
1254 s8 += s16 * 136657
1255 s9 -= s16 * 683901
1256 s16 = 0
1257
1258 s3 += s15 * 666643
1259 s4 += s15 * 470296
1260 s5 += s15 * 654183
1261 s6 -= s15 * 997805
1262 s7 += s15 * 136657
1263 s8 -= s15 * 683901
1264 s15 = 0
1265
1266 s2 += s14 * 666643
1267 s3 += s14 * 470296
1268 s4 += s14 * 654183
1269 s5 -= s14 * 997805
1270 s6 += s14 * 136657
1271 s7 -= s14 * 683901
1272 s14 = 0
1273
1274 s1 += s13 * 666643
1275 s2 += s13 * 470296
1276 s3 += s13 * 654183
1277 s4 -= s13 * 997805
1278 s5 += s13 * 136657
1279 s6 -= s13 * 683901
1280 s13 = 0
1281
1282 s0 += s12 * 666643
1283 s1 += s12 * 470296
1284 s2 += s12 * 654183
1285 s3 -= s12 * 997805
1286 s4 += s12 * 136657
1287 s5 -= s12 * 683901
1288 s12 = 0
1289
1290 carry[0] = (s0 + (1 << 20)) >> 21
1291 s1 += carry[0]
1292 s0 -= carry[0] << 21
1293 carry[2] = (s2 + (1 << 20)) >> 21
1294 s3 += carry[2]
1295 s2 -= carry[2] << 21
1296 carry[4] = (s4 + (1 << 20)) >> 21
1297 s5 += carry[4]
1298 s4 -= carry[4] << 21
1299 carry[6] = (s6 + (1 << 20)) >> 21
1300 s7 += carry[6]
1301 s6 -= carry[6] << 21
1302 carry[8] = (s8 + (1 << 20)) >> 21
1303 s9 += carry[8]
1304 s8 -= carry[8] << 21
1305 carry[10] = (s10 + (1 << 20)) >> 21
1306 s11 += carry[10]
1307 s10 -= carry[10] << 21
1308
1309 carry[1] = (s1 + (1 << 20)) >> 21
1310 s2 += carry[1]
1311 s1 -= carry[1] << 21
1312 carry[3] = (s3 + (1 << 20)) >> 21
1313 s4 += carry[3]
1314 s3 -= carry[3] << 21
1315 carry[5] = (s5 + (1 << 20)) >> 21
1316 s6 += carry[5]
1317 s5 -= carry[5] << 21
1318 carry[7] = (s7 + (1 << 20)) >> 21
1319 s8 += carry[7]
1320 s7 -= carry[7] << 21
1321 carry[9] = (s9 + (1 << 20)) >> 21
1322 s10 += carry[9]
1323 s9 -= carry[9] << 21
1324 carry[11] = (s11 + (1 << 20)) >> 21
1325 s12 += carry[11]
1326 s11 -= carry[11] << 21
1327
1328 s0 += s12 * 666643
1329 s1 += s12 * 470296
1330 s2 += s12 * 654183
1331 s3 -= s12 * 997805
1332 s4 += s12 * 136657
1333 s5 -= s12 * 683901
1334 s12 = 0
1335
1336 carry[0] = s0 >> 21
1337 s1 += carry[0]
1338 s0 -= carry[0] << 21
1339 carry[1] = s1 >> 21
1340 s2 += carry[1]
1341 s1 -= carry[1] << 21
1342 carry[2] = s2 >> 21
1343 s3 += carry[2]
1344 s2 -= carry[2] << 21
1345 carry[3] = s3 >> 21
1346 s4 += carry[3]
1347 s3 -= carry[3] << 21
1348 carry[4] = s4 >> 21
1349 s5 += carry[4]
1350 s4 -= carry[4] << 21
1351 carry[5] = s5 >> 21
1352 s6 += carry[5]
1353 s5 -= carry[5] << 21
1354 carry[6] = s6 >> 21
1355 s7 += carry[6]
1356 s6 -= carry[6] << 21
1357 carry[7] = s7 >> 21
1358 s8 += carry[7]
1359 s7 -= carry[7] << 21
1360 carry[8] = s8 >> 21
1361 s9 += carry[8]
1362 s8 -= carry[8] << 21
1363 carry[9] = s9 >> 21
1364 s10 += carry[9]
1365 s9 -= carry[9] << 21
1366 carry[10] = s10 >> 21
1367 s11 += carry[10]
1368 s10 -= carry[10] << 21
1369 carry[11] = s11 >> 21
1370 s12 += carry[11]
1371 s11 -= carry[11] << 21
1372
1373 s0 += s12 * 666643
1374 s1 += s12 * 470296
1375 s2 += s12 * 654183
1376 s3 -= s12 * 997805
1377 s4 += s12 * 136657
1378 s5 -= s12 * 683901
1379 s12 = 0
1380
1381 carry[0] = s0 >> 21
1382 s1 += carry[0]
1383 s0 -= carry[0] << 21
1384 carry[1] = s1 >> 21
1385 s2 += carry[1]
1386 s1 -= carry[1] << 21
1387 carry[2] = s2 >> 21
1388 s3 += carry[2]
1389 s2 -= carry[2] << 21
1390 carry[3] = s3 >> 21
1391 s4 += carry[3]
1392 s3 -= carry[3] << 21
1393 carry[4] = s4 >> 21
1394 s5 += carry[4]
1395 s4 -= carry[4] << 21
1396 carry[5] = s5 >> 21
1397 s6 += carry[5]
1398 s5 -= carry[5] << 21
1399 carry[6] = s6 >> 21
1400 s7 += carry[6]
1401 s6 -= carry[6] << 21
1402 carry[7] = s7 >> 21
1403 s8 += carry[7]
1404 s7 -= carry[7] << 21
1405 carry[8] = s8 >> 21
1406 s9 += carry[8]
1407 s8 -= carry[8] << 21
1408 carry[9] = s9 >> 21
1409 s10 += carry[9]
1410 s9 -= carry[9] << 21
1411 carry[10] = s10 >> 21
1412 s11 += carry[10]
1413 s10 -= carry[10] << 21
1414
1415 s[0] = byte(s0 >> 0)
1416 s[1] = byte(s0 >> 8)
1417 s[2] = byte((s0 >> 16) | (s1 << 5))
1418 s[3] = byte(s1 >> 3)
1419 s[4] = byte(s1 >> 11)
1420 s[5] = byte((s1 >> 19) | (s2 << 2))
1421 s[6] = byte(s2 >> 6)
1422 s[7] = byte((s2 >> 14) | (s3 << 7))
1423 s[8] = byte(s3 >> 1)
1424 s[9] = byte(s3 >> 9)
1425 s[10] = byte((s3 >> 17) | (s4 << 4))
1426 s[11] = byte(s4 >> 4)
1427 s[12] = byte(s4 >> 12)
1428 s[13] = byte((s4 >> 20) | (s5 << 1))
1429 s[14] = byte(s5 >> 7)
1430 s[15] = byte((s5 >> 15) | (s6 << 6))
1431 s[16] = byte(s6 >> 2)
1432 s[17] = byte(s6 >> 10)
1433 s[18] = byte((s6 >> 18) | (s7 << 3))
1434 s[19] = byte(s7 >> 5)
1435 s[20] = byte(s7 >> 13)
1436 s[21] = byte(s8 >> 0)
1437 s[22] = byte(s8 >> 8)
1438 s[23] = byte((s8 >> 16) | (s9 << 5))
1439 s[24] = byte(s9 >> 3)
1440 s[25] = byte(s9 >> 11)
1441 s[26] = byte((s9 >> 19) | (s10 << 2))
1442 s[27] = byte(s10 >> 6)
1443 s[28] = byte((s10 >> 14) | (s11 << 7))
1444 s[29] = byte(s11 >> 1)
1445 s[30] = byte(s11 >> 9)
1446 s[31] = byte(s11 >> 17)
1447}
1448
1449// Input:
1450// s[0]+256*s[1]+...+256^63*s[63] = s
1451//
1452// Output:
1453// s[0]+256*s[1]+...+256^31*s[31] = s mod l
1454// where l = 2^252 + 27742317777372353535851937790883648493.
1455func ScReduce(out *[32]byte, s *[64]byte) {
1456 s0 := 2097151 & load3(s[:])
1457 s1 := 2097151 & (load4(s[2:]) >> 5)
1458 s2 := 2097151 & (load3(s[5:]) >> 2)
1459 s3 := 2097151 & (load4(s[7:]) >> 7)
1460 s4 := 2097151 & (load4(s[10:]) >> 4)
1461 s5 := 2097151 & (load3(s[13:]) >> 1)
1462 s6 := 2097151 & (load4(s[15:]) >> 6)
1463 s7 := 2097151 & (load3(s[18:]) >> 3)
1464 s8 := 2097151 & load3(s[21:])
1465 s9 := 2097151 & (load4(s[23:]) >> 5)
1466 s10 := 2097151 & (load3(s[26:]) >> 2)
1467 s11 := 2097151 & (load4(s[28:]) >> 7)
1468 s12 := 2097151 & (load4(s[31:]) >> 4)
1469 s13 := 2097151 & (load3(s[34:]) >> 1)
1470 s14 := 2097151 & (load4(s[36:]) >> 6)
1471 s15 := 2097151 & (load3(s[39:]) >> 3)
1472 s16 := 2097151 & load3(s[42:])
1473 s17 := 2097151 & (load4(s[44:]) >> 5)
1474 s18 := 2097151 & (load3(s[47:]) >> 2)
1475 s19 := 2097151 & (load4(s[49:]) >> 7)
1476 s20 := 2097151 & (load4(s[52:]) >> 4)
1477 s21 := 2097151 & (load3(s[55:]) >> 1)
1478 s22 := 2097151 & (load4(s[57:]) >> 6)
1479 s23 := (load4(s[60:]) >> 3)
1480
1481 s11 += s23 * 666643
1482 s12 += s23 * 470296
1483 s13 += s23 * 654183
1484 s14 -= s23 * 997805
1485 s15 += s23 * 136657
1486 s16 -= s23 * 683901
1487 s23 = 0
1488
1489 s10 += s22 * 666643
1490 s11 += s22 * 470296
1491 s12 += s22 * 654183
1492 s13 -= s22 * 997805
1493 s14 += s22 * 136657
1494 s15 -= s22 * 683901
1495 s22 = 0
1496
1497 s9 += s21 * 666643
1498 s10 += s21 * 470296
1499 s11 += s21 * 654183
1500 s12 -= s21 * 997805
1501 s13 += s21 * 136657
1502 s14 -= s21 * 683901
1503 s21 = 0
1504
1505 s8 += s20 * 666643
1506 s9 += s20 * 470296
1507 s10 += s20 * 654183
1508 s11 -= s20 * 997805
1509 s12 += s20 * 136657
1510 s13 -= s20 * 683901
1511 s20 = 0
1512
1513 s7 += s19 * 666643
1514 s8 += s19 * 470296
1515 s9 += s19 * 654183
1516 s10 -= s19 * 997805
1517 s11 += s19 * 136657
1518 s12 -= s19 * 683901
1519 s19 = 0
1520
1521 s6 += s18 * 666643
1522 s7 += s18 * 470296
1523 s8 += s18 * 654183
1524 s9 -= s18 * 997805
1525 s10 += s18 * 136657
1526 s11 -= s18 * 683901
1527 s18 = 0
1528
1529 var carry [17]int64
1530
1531 carry[6] = (s6 + (1 << 20)) >> 21
1532 s7 += carry[6]
1533 s6 -= carry[6] << 21
1534 carry[8] = (s8 + (1 << 20)) >> 21
1535 s9 += carry[8]
1536 s8 -= carry[8] << 21
1537 carry[10] = (s10 + (1 << 20)) >> 21
1538 s11 += carry[10]
1539 s10 -= carry[10] << 21
1540 carry[12] = (s12 + (1 << 20)) >> 21
1541 s13 += carry[12]
1542 s12 -= carry[12] << 21
1543 carry[14] = (s14 + (1 << 20)) >> 21
1544 s15 += carry[14]
1545 s14 -= carry[14] << 21
1546 carry[16] = (s16 + (1 << 20)) >> 21
1547 s17 += carry[16]
1548 s16 -= carry[16] << 21
1549
1550 carry[7] = (s7 + (1 << 20)) >> 21
1551 s8 += carry[7]
1552 s7 -= carry[7] << 21
1553 carry[9] = (s9 + (1 << 20)) >> 21
1554 s10 += carry[9]
1555 s9 -= carry[9] << 21
1556 carry[11] = (s11 + (1 << 20)) >> 21
1557 s12 += carry[11]
1558 s11 -= carry[11] << 21
1559 carry[13] = (s13 + (1 << 20)) >> 21
1560 s14 += carry[13]
1561 s13 -= carry[13] << 21
1562 carry[15] = (s15 + (1 << 20)) >> 21
1563 s16 += carry[15]
1564 s15 -= carry[15] << 21
1565
1566 s5 += s17 * 666643
1567 s6 += s17 * 470296
1568 s7 += s17 * 654183
1569 s8 -= s17 * 997805
1570 s9 += s17 * 136657
1571 s10 -= s17 * 683901
1572 s17 = 0
1573
1574 s4 += s16 * 666643
1575 s5 += s16 * 470296
1576 s6 += s16 * 654183
1577 s7 -= s16 * 997805
1578 s8 += s16 * 136657
1579 s9 -= s16 * 683901
1580 s16 = 0
1581
1582 s3 += s15 * 666643
1583 s4 += s15 * 470296
1584 s5 += s15 * 654183
1585 s6 -= s15 * 997805
1586 s7 += s15 * 136657
1587 s8 -= s15 * 683901
1588 s15 = 0
1589
1590 s2 += s14 * 666643
1591 s3 += s14 * 470296
1592 s4 += s14 * 654183
1593 s5 -= s14 * 997805
1594 s6 += s14 * 136657
1595 s7 -= s14 * 683901
1596 s14 = 0
1597
1598 s1 += s13 * 666643
1599 s2 += s13 * 470296
1600 s3 += s13 * 654183
1601 s4 -= s13 * 997805
1602 s5 += s13 * 136657
1603 s6 -= s13 * 683901
1604 s13 = 0
1605
1606 s0 += s12 * 666643
1607 s1 += s12 * 470296
1608 s2 += s12 * 654183
1609 s3 -= s12 * 997805
1610 s4 += s12 * 136657
1611 s5 -= s12 * 683901
1612 s12 = 0
1613
1614 carry[0] = (s0 + (1 << 20)) >> 21
1615 s1 += carry[0]
1616 s0 -= carry[0] << 21
1617 carry[2] = (s2 + (1 << 20)) >> 21
1618 s3 += carry[2]
1619 s2 -= carry[2] << 21
1620 carry[4] = (s4 + (1 << 20)) >> 21
1621 s5 += carry[4]
1622 s4 -= carry[4] << 21
1623 carry[6] = (s6 + (1 << 20)) >> 21
1624 s7 += carry[6]
1625 s6 -= carry[6] << 21
1626 carry[8] = (s8 + (1 << 20)) >> 21
1627 s9 += carry[8]
1628 s8 -= carry[8] << 21
1629 carry[10] = (s10 + (1 << 20)) >> 21
1630 s11 += carry[10]
1631 s10 -= carry[10] << 21
1632
1633 carry[1] = (s1 + (1 << 20)) >> 21
1634 s2 += carry[1]
1635 s1 -= carry[1] << 21
1636 carry[3] = (s3 + (1 << 20)) >> 21
1637 s4 += carry[3]
1638 s3 -= carry[3] << 21
1639 carry[5] = (s5 + (1 << 20)) >> 21
1640 s6 += carry[5]
1641 s5 -= carry[5] << 21
1642 carry[7] = (s7 + (1 << 20)) >> 21
1643 s8 += carry[7]
1644 s7 -= carry[7] << 21
1645 carry[9] = (s9 + (1 << 20)) >> 21
1646 s10 += carry[9]
1647 s9 -= carry[9] << 21
1648 carry[11] = (s11 + (1 << 20)) >> 21
1649 s12 += carry[11]
1650 s11 -= carry[11] << 21
1651
1652 s0 += s12 * 666643
1653 s1 += s12 * 470296
1654 s2 += s12 * 654183
1655 s3 -= s12 * 997805
1656 s4 += s12 * 136657
1657 s5 -= s12 * 683901
1658 s12 = 0
1659
1660 carry[0] = s0 >> 21
1661 s1 += carry[0]
1662 s0 -= carry[0] << 21
1663 carry[1] = s1 >> 21
1664 s2 += carry[1]
1665 s1 -= carry[1] << 21
1666 carry[2] = s2 >> 21
1667 s3 += carry[2]
1668 s2 -= carry[2] << 21
1669 carry[3] = s3 >> 21
1670 s4 += carry[3]
1671 s3 -= carry[3] << 21
1672 carry[4] = s4 >> 21
1673 s5 += carry[4]
1674 s4 -= carry[4] << 21
1675 carry[5] = s5 >> 21
1676 s6 += carry[5]
1677 s5 -= carry[5] << 21
1678 carry[6] = s6 >> 21
1679 s7 += carry[6]
1680 s6 -= carry[6] << 21
1681 carry[7] = s7 >> 21
1682 s8 += carry[7]
1683 s7 -= carry[7] << 21
1684 carry[8] = s8 >> 21
1685 s9 += carry[8]
1686 s8 -= carry[8] << 21
1687 carry[9] = s9 >> 21
1688 s10 += carry[9]
1689 s9 -= carry[9] << 21
1690 carry[10] = s10 >> 21
1691 s11 += carry[10]
1692 s10 -= carry[10] << 21
1693 carry[11] = s11 >> 21
1694 s12 += carry[11]
1695 s11 -= carry[11] << 21
1696
1697 s0 += s12 * 666643
1698 s1 += s12 * 470296
1699 s2 += s12 * 654183
1700 s3 -= s12 * 997805
1701 s4 += s12 * 136657
1702 s5 -= s12 * 683901
1703 s12 = 0
1704
1705 carry[0] = s0 >> 21
1706 s1 += carry[0]
1707 s0 -= carry[0] << 21
1708 carry[1] = s1 >> 21
1709 s2 += carry[1]
1710 s1 -= carry[1] << 21
1711 carry[2] = s2 >> 21
1712 s3 += carry[2]
1713 s2 -= carry[2] << 21
1714 carry[3] = s3 >> 21
1715 s4 += carry[3]
1716 s3 -= carry[3] << 21
1717 carry[4] = s4 >> 21
1718 s5 += carry[4]
1719 s4 -= carry[4] << 21
1720 carry[5] = s5 >> 21
1721 s6 += carry[5]
1722 s5 -= carry[5] << 21
1723 carry[6] = s6 >> 21
1724 s7 += carry[6]
1725 s6 -= carry[6] << 21
1726 carry[7] = s7 >> 21
1727 s8 += carry[7]
1728 s7 -= carry[7] << 21
1729 carry[8] = s8 >> 21
1730 s9 += carry[8]
1731 s8 -= carry[8] << 21
1732 carry[9] = s9 >> 21
1733 s10 += carry[9]
1734 s9 -= carry[9] << 21
1735 carry[10] = s10 >> 21
1736 s11 += carry[10]
1737 s10 -= carry[10] << 21
1738
1739 out[0] = byte(s0 >> 0)
1740 out[1] = byte(s0 >> 8)
1741 out[2] = byte((s0 >> 16) | (s1 << 5))
1742 out[3] = byte(s1 >> 3)
1743 out[4] = byte(s1 >> 11)
1744 out[5] = byte((s1 >> 19) | (s2 << 2))
1745 out[6] = byte(s2 >> 6)
1746 out[7] = byte((s2 >> 14) | (s3 << 7))
1747 out[8] = byte(s3 >> 1)
1748 out[9] = byte(s3 >> 9)
1749 out[10] = byte((s3 >> 17) | (s4 << 4))
1750 out[11] = byte(s4 >> 4)
1751 out[12] = byte(s4 >> 12)
1752 out[13] = byte((s4 >> 20) | (s5 << 1))
1753 out[14] = byte(s5 >> 7)
1754 out[15] = byte((s5 >> 15) | (s6 << 6))
1755 out[16] = byte(s6 >> 2)
1756 out[17] = byte(s6 >> 10)
1757 out[18] = byte((s6 >> 18) | (s7 << 3))
1758 out[19] = byte(s7 >> 5)
1759 out[20] = byte(s7 >> 13)
1760 out[21] = byte(s8 >> 0)
1761 out[22] = byte(s8 >> 8)
1762 out[23] = byte((s8 >> 16) | (s9 << 5))
1763 out[24] = byte(s9 >> 3)
1764 out[25] = byte(s9 >> 11)
1765 out[26] = byte((s9 >> 19) | (s10 << 2))
1766 out[27] = byte(s10 >> 6)
1767 out[28] = byte((s10 >> 14) | (s11 << 7))
1768 out[29] = byte(s11 >> 1)
1769 out[30] = byte(s11 >> 9)
1770 out[31] = byte(s11 >> 17)
1771}
diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go
new file mode 100644
index 0000000..6931b51
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/buffer.go
@@ -0,0 +1,98 @@
1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "io"
9 "sync"
10)
11
12// buffer provides a linked list buffer for data exchange
13// between producer and consumer. Theoretically the buffer is
14// of unlimited capacity as it does no allocation of its own.
15type buffer struct {
16 // protects concurrent access to head, tail and closed
17 *sync.Cond
18
19 head *element // the buffer that will be read first
20 tail *element // the buffer that will be read last
21
22 closed bool
23}
24
25// An element represents a single link in a linked list.
26type element struct {
27 buf []byte
28 next *element
29}
30
31// newBuffer returns an empty buffer that is not closed.
32func newBuffer() *buffer {
33 e := new(element)
34 b := &buffer{
35 Cond: newCond(),
36 head: e,
37 tail: e,
38 }
39 return b
40}
41
42// write makes buf available for Read to receive.
43// buf must not be modified after the call to write.
44func (b *buffer) write(buf []byte) {
45 b.Cond.L.Lock()
46 e := &element{buf: buf}
47 b.tail.next = e
48 b.tail = e
49 b.Cond.Signal()
50 b.Cond.L.Unlock()
51}
52
53// eof closes the buffer. Reads from the buffer once all
54// the data has been consumed will receive os.EOF.
55func (b *buffer) eof() error {
56 b.Cond.L.Lock()
57 b.closed = true
58 b.Cond.Signal()
59 b.Cond.L.Unlock()
60 return nil
61}
62
63// Read reads data from the internal buffer in buf. Reads will block
64// if no data is available, or until the buffer is closed.
65func (b *buffer) Read(buf []byte) (n int, err error) {
66 b.Cond.L.Lock()
67 defer b.Cond.L.Unlock()
68
69 for len(buf) > 0 {
70 // if there is data in b.head, copy it
71 if len(b.head.buf) > 0 {
72 r := copy(buf, b.head.buf)
73 buf, b.head.buf = buf[r:], b.head.buf[r:]
74 n += r
75 continue
76 }
77 // if there is a next buffer, make it the head
78 if len(b.head.buf) == 0 && b.head != b.tail {
79 b.head = b.head.next
80 continue
81 }
82
83 // if at least one byte has been copied, return
84 if n > 0 {
85 break
86 }
87
88 // if nothing was read, and there is nothing outstanding
89 // check to see if the buffer is closed.
90 if b.closed {
91 err = io.EOF
92 break
93 }
94 // out of buffers, wait for producer
95 b.Cond.Wait()
96 }
97 return
98}
diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go
new file mode 100644
index 0000000..6331c94
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/certs.go
@@ -0,0 +1,503 @@
1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "bytes"
9 "errors"
10 "fmt"
11 "io"
12 "net"
13 "sort"
14 "time"
15)
16
17// These constants from [PROTOCOL.certkeys] represent the algorithm names
18// for certificate types supported by this package.
19const (
20 CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
21 CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
22 CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com"
23 CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com"
24 CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com"
25 CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com"
26)
27
28// Certificate types distinguish between host and user
29// certificates. The values can be set in the CertType field of
30// Certificate.
31const (
32 UserCert = 1
33 HostCert = 2
34)
35
36// Signature represents a cryptographic signature.
37type Signature struct {
38 Format string
39 Blob []byte
40}
41
42// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that
43// a certificate does not expire.
44const CertTimeInfinity = 1<<64 - 1
45
46// An Certificate represents an OpenSSH certificate as defined in
47// [PROTOCOL.certkeys]?rev=1.8.
48type Certificate struct {
49 Nonce []byte
50 Key PublicKey
51 Serial uint64
52 CertType uint32
53 KeyId string
54 ValidPrincipals []string
55 ValidAfter uint64
56 ValidBefore uint64
57 Permissions
58 Reserved []byte
59 SignatureKey PublicKey
60 Signature *Signature
61}
62
63// genericCertData holds the key-independent part of the certificate data.
64// Overall, certificates contain an nonce, public key fields and
65// key-independent fields.
66type genericCertData struct {
67 Serial uint64
68 CertType uint32
69 KeyId string
70 ValidPrincipals []byte
71 ValidAfter uint64
72 ValidBefore uint64
73 CriticalOptions []byte
74 Extensions []byte
75 Reserved []byte
76 SignatureKey []byte
77 Signature []byte
78}
79
80func marshalStringList(namelist []string) []byte {
81 var to []byte
82 for _, name := range namelist {
83 s := struct{ N string }{name}
84 to = append(to, Marshal(&s)...)
85 }
86 return to
87}
88
89type optionsTuple struct {
90 Key string
91 Value []byte
92}
93
94type optionsTupleValue struct {
95 Value string
96}
97
98// serialize a map of critical options or extensions
99// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
100// we need two length prefixes for a non-empty string value
101func marshalTuples(tups map[string]string) []byte {
102 keys := make([]string, 0, len(tups))
103 for key := range tups {
104 keys = append(keys, key)
105 }
106 sort.Strings(keys)
107
108 var ret []byte
109 for _, key := range keys {
110 s := optionsTuple{Key: key}
111 if value := tups[key]; len(value) > 0 {
112 s.Value = Marshal(&optionsTupleValue{value})
113 }
114 ret = append(ret, Marshal(&s)...)
115 }
116 return ret
117}
118
119// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
120// we need two length prefixes for a non-empty option value
121func parseTuples(in []byte) (map[string]string, error) {
122 tups := map[string]string{}
123 var lastKey string
124 var haveLastKey bool
125
126 for len(in) > 0 {
127 var key, val, extra []byte
128 var ok bool
129
130 if key, in, ok = parseString(in); !ok {
131 return nil, errShortRead
132 }
133 keyStr := string(key)
134 // according to [PROTOCOL.certkeys], the names must be in
135 // lexical order.
136 if haveLastKey && keyStr <= lastKey {
137 return nil, fmt.Errorf("ssh: certificate options are not in lexical order")
138 }
139 lastKey, haveLastKey = keyStr, true
140 // the next field is a data field, which if non-empty has a string embedded
141 if val, in, ok = parseString(in); !ok {
142 return nil, errShortRead
143 }
144 if len(val) > 0 {
145 val, extra, ok = parseString(val)
146 if !ok {
147 return nil, errShortRead
148 }
149 if len(extra) > 0 {
150 return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value")
151 }
152 tups[keyStr] = string(val)
153 } else {
154 tups[keyStr] = ""
155 }
156 }
157 return tups, nil
158}
159
160func parseCert(in []byte, privAlgo string) (*Certificate, error) {
161 nonce, rest, ok := parseString(in)
162 if !ok {
163 return nil, errShortRead
164 }
165
166 key, rest, err := parsePubKey(rest, privAlgo)
167 if err != nil {
168 return nil, err
169 }
170
171 var g genericCertData
172 if err := Unmarshal(rest, &g); err != nil {
173 return nil, err
174 }
175
176 c := &Certificate{
177 Nonce: nonce,
178 Key: key,
179 Serial: g.Serial,
180 CertType: g.CertType,
181 KeyId: g.KeyId,
182 ValidAfter: g.ValidAfter,
183 ValidBefore: g.ValidBefore,
184 }
185
186 for principals := g.ValidPrincipals; len(principals) > 0; {
187 principal, rest, ok := parseString(principals)
188 if !ok {
189 return nil, errShortRead
190 }
191 c.ValidPrincipals = append(c.ValidPrincipals, string(principal))
192 principals = rest
193 }
194
195 c.CriticalOptions, err = parseTuples(g.CriticalOptions)
196 if err != nil {
197 return nil, err
198 }
199 c.Extensions, err = parseTuples(g.Extensions)
200 if err != nil {
201 return nil, err
202 }
203 c.Reserved = g.Reserved
204 k, err := ParsePublicKey(g.SignatureKey)
205 if err != nil {
206 return nil, err
207 }
208
209 c.SignatureKey = k
210 c.Signature, rest, ok = parseSignatureBody(g.Signature)
211 if !ok || len(rest) > 0 {
212 return nil, errors.New("ssh: signature parse error")
213 }
214
215 return c, nil
216}
217
218type openSSHCertSigner struct {
219 pub *Certificate
220 signer Signer
221}
222
223// NewCertSigner returns a Signer that signs with the given Certificate, whose
224// private key is held by signer. It returns an error if the public key in cert
225// doesn't match the key used by signer.
226func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) {
227 if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 {
228 return nil, errors.New("ssh: signer and cert have different public key")
229 }
230
231 return &openSSHCertSigner{cert, signer}, nil
232}
233
234func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
235 return s.signer.Sign(rand, data)
236}
237
238func (s *openSSHCertSigner) PublicKey() PublicKey {
239 return s.pub
240}
241
242const sourceAddressCriticalOption = "source-address"
243
244// CertChecker does the work of verifying a certificate. Its methods
245// can be plugged into ClientConfig.HostKeyCallback and
246// ServerConfig.PublicKeyCallback. For the CertChecker to work,
247// minimally, the IsAuthority callback should be set.
248type CertChecker struct {
249 // SupportedCriticalOptions lists the CriticalOptions that the
250 // server application layer understands. These are only used
251 // for user certificates.
252 SupportedCriticalOptions []string
253
254 // IsAuthority should return true if the key is recognized as
255 // an authority. This allows for certificates to be signed by other
256 // certificates.
257 IsAuthority func(auth PublicKey) bool
258
259 // Clock is used for verifying time stamps. If nil, time.Now
260 // is used.
261 Clock func() time.Time
262
263 // UserKeyFallback is called when CertChecker.Authenticate encounters a
264 // public key that is not a certificate. It must implement validation
265 // of user keys or else, if nil, all such keys are rejected.
266 UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
267
268 // HostKeyFallback is called when CertChecker.CheckHostKey encounters a
269 // public key that is not a certificate. It must implement host key
270 // validation or else, if nil, all such keys are rejected.
271 HostKeyFallback func(addr string, remote net.Addr, key PublicKey) error
272
273 // IsRevoked is called for each certificate so that revocation checking
274 // can be implemented. It should return true if the given certificate
275 // is revoked and false otherwise. If nil, no certificates are
276 // considered to have been revoked.
277 IsRevoked func(cert *Certificate) bool
278}
279
280// CheckHostKey checks a host key certificate. This method can be
281// plugged into ClientConfig.HostKeyCallback.
282func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error {
283 cert, ok := key.(*Certificate)
284 if !ok {
285 if c.HostKeyFallback != nil {
286 return c.HostKeyFallback(addr, remote, key)
287 }
288 return errors.New("ssh: non-certificate host key")
289 }
290 if cert.CertType != HostCert {
291 return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType)
292 }
293
294 return c.CheckCert(addr, cert)
295}
296
297// Authenticate checks a user certificate. Authenticate can be used as
298// a value for ServerConfig.PublicKeyCallback.
299func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) {
300 cert, ok := pubKey.(*Certificate)
301 if !ok {
302 if c.UserKeyFallback != nil {
303 return c.UserKeyFallback(conn, pubKey)
304 }
305 return nil, errors.New("ssh: normal key pairs not accepted")
306 }
307
308 if cert.CertType != UserCert {
309 return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType)
310 }
311
312 if err := c.CheckCert(conn.User(), cert); err != nil {
313 return nil, err
314 }
315
316 return &cert.Permissions, nil
317}
318
319// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and
320// the signature of the certificate.
321func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
322 if c.IsRevoked != nil && c.IsRevoked(cert) {
323 return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial)
324 }
325
326 for opt, _ := range cert.CriticalOptions {
327 // sourceAddressCriticalOption will be enforced by
328 // serverAuthenticate
329 if opt == sourceAddressCriticalOption {
330 continue
331 }
332
333 found := false
334 for _, supp := range c.SupportedCriticalOptions {
335 if supp == opt {
336 found = true
337 break
338 }
339 }
340 if !found {
341 return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt)
342 }
343 }
344
345 if len(cert.ValidPrincipals) > 0 {
346 // By default, certs are valid for all users/hosts.
347 found := false
348 for _, p := range cert.ValidPrincipals {
349 if p == principal {
350 found = true
351 break
352 }
353 }
354 if !found {
355 return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals)
356 }
357 }
358
359 if !c.IsAuthority(cert.SignatureKey) {
360 return fmt.Errorf("ssh: certificate signed by unrecognized authority")
361 }
362
363 clock := c.Clock
364 if clock == nil {
365 clock = time.Now
366 }
367
368 unixNow := clock().Unix()
369 if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) {
370 return fmt.Errorf("ssh: cert is not yet valid")
371 }
372 if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) {
373 return fmt.Errorf("ssh: cert has expired")
374 }
375 if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil {
376 return fmt.Errorf("ssh: certificate signature does not verify")
377 }
378
379 return nil
380}
381
382// SignCert sets c.SignatureKey to the authority's public key and stores a
383// Signature, by authority, in the certificate.
384func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
385 c.Nonce = make([]byte, 32)
386 if _, err := io.ReadFull(rand, c.Nonce); err != nil {
387 return err
388 }
389 c.SignatureKey = authority.PublicKey()
390
391 sig, err := authority.Sign(rand, c.bytesForSigning())
392 if err != nil {
393 return err
394 }
395 c.Signature = sig
396 return nil
397}
398
399var certAlgoNames = map[string]string{
400 KeyAlgoRSA: CertAlgoRSAv01,
401 KeyAlgoDSA: CertAlgoDSAv01,
402 KeyAlgoECDSA256: CertAlgoECDSA256v01,
403 KeyAlgoECDSA384: CertAlgoECDSA384v01,
404 KeyAlgoECDSA521: CertAlgoECDSA521v01,
405 KeyAlgoED25519: CertAlgoED25519v01,
406}
407
408// certToPrivAlgo returns the underlying algorithm for a certificate algorithm.
409// Panics if a non-certificate algorithm is passed.
410func certToPrivAlgo(algo string) string {
411 for privAlgo, pubAlgo := range certAlgoNames {
412 if pubAlgo == algo {
413 return privAlgo
414 }
415 }
416 panic("unknown cert algorithm")
417}
418
419func (cert *Certificate) bytesForSigning() []byte {
420 c2 := *cert
421 c2.Signature = nil
422 out := c2.Marshal()
423 // Drop trailing signature length.
424 return out[:len(out)-4]
425}
426
427// Marshal serializes c into OpenSSH's wire format. It is part of the
428// PublicKey interface.
429func (c *Certificate) Marshal() []byte {
430 generic := genericCertData{
431 Serial: c.Serial,
432 CertType: c.CertType,
433 KeyId: c.KeyId,
434 ValidPrincipals: marshalStringList(c.ValidPrincipals),
435 ValidAfter: uint64(c.ValidAfter),
436 ValidBefore: uint64(c.ValidBefore),
437 CriticalOptions: marshalTuples(c.CriticalOptions),
438 Extensions: marshalTuples(c.Extensions),
439 Reserved: c.Reserved,
440 SignatureKey: c.SignatureKey.Marshal(),
441 }
442 if c.Signature != nil {
443 generic.Signature = Marshal(c.Signature)
444 }
445 genericBytes := Marshal(&generic)
446 keyBytes := c.Key.Marshal()
447 _, keyBytes, _ = parseString(keyBytes)
448 prefix := Marshal(&struct {
449 Name string
450 Nonce []byte
451 Key []byte `ssh:"rest"`
452 }{c.Type(), c.Nonce, keyBytes})
453
454 result := make([]byte, 0, len(prefix)+len(genericBytes))
455 result = append(result, prefix...)
456 result = append(result, genericBytes...)
457 return result
458}
459
460// Type returns the key name. It is part of the PublicKey interface.
461func (c *Certificate) Type() string {
462 algo, ok := certAlgoNames[c.Key.Type()]
463 if !ok {
464 panic("unknown cert key type " + c.Key.Type())
465 }
466 return algo
467}
468
469// Verify verifies a signature against the certificate's public
470// key. It is part of the PublicKey interface.
471func (c *Certificate) Verify(data []byte, sig *Signature) error {
472 return c.Key.Verify(data, sig)
473}
474
475func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) {
476 format, in, ok := parseString(in)
477 if !ok {
478 return
479 }
480
481 out = &Signature{
482 Format: string(format),
483 }
484
485 if out.Blob, in, ok = parseString(in); !ok {
486 return
487 }
488
489 return out, in, ok
490}
491
492func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) {
493 sigBytes, rest, ok := parseString(in)
494 if !ok {
495 return
496 }
497
498 out, trailing, ok := parseSignatureBody(sigBytes)
499 if !ok || len(trailing) > 0 {
500 return nil, nil, false
501 }
502 return
503}
diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go
new file mode 100644
index 0000000..195530e
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/channel.go
@@ -0,0 +1,633 @@
1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "encoding/binary"
9 "errors"
10 "fmt"
11 "io"
12 "log"
13 "sync"
14)
15
16const (
17 minPacketLength = 9
18 // channelMaxPacket contains the maximum number of bytes that will be
19 // sent in a single packet. As per RFC 4253, section 6.1, 32k is also
20 // the minimum.
21 channelMaxPacket = 1 << 15
22 // We follow OpenSSH here.
23 channelWindowSize = 64 * channelMaxPacket
24)
25
26// NewChannel represents an incoming request to a channel. It must either be
27// accepted for use by calling Accept, or rejected by calling Reject.
28type NewChannel interface {
29 // Accept accepts the channel creation request. It returns the Channel
30 // and a Go channel containing SSH requests. The Go channel must be
31 // serviced otherwise the Channel will hang.
32 Accept() (Channel, <-chan *Request, error)
33
34 // Reject rejects the channel creation request. After calling
35 // this, no other methods on the Channel may be called.
36 Reject(reason RejectionReason, message string) error
37
38 // ChannelType returns the type of the channel, as supplied by the
39 // client.
40 ChannelType() string
41
42 // ExtraData returns the arbitrary payload for this channel, as supplied
43 // by the client. This data is specific to the channel type.
44 ExtraData() []byte
45}
46
47// A Channel is an ordered, reliable, flow-controlled, duplex stream
48// that is multiplexed over an SSH connection.
49type Channel interface {
50 // Read reads up to len(data) bytes from the channel.
51 Read(data []byte) (int, error)
52
53 // Write writes len(data) bytes to the channel.
54 Write(data []byte) (int, error)
55
56 // Close signals end of channel use. No data may be sent after this
57 // call.
58 Close() error
59
60 // CloseWrite signals the end of sending in-band
61 // data. Requests may still be sent, and the other side may
62 // still send data
63 CloseWrite() error
64
65 // SendRequest sends a channel request. If wantReply is true,
66 // it will wait for a reply and return the result as a
67 // boolean, otherwise the return value will be false. Channel
68 // requests are out-of-band messages so they may be sent even
69 // if the data stream is closed or blocked by flow control.
70 // If the channel is closed before a reply is returned, io.EOF
71 // is returned.
72 SendRequest(name string, wantReply bool, payload []byte) (bool, error)
73
74 // Stderr returns an io.ReadWriter that writes to this channel
75 // with the extended data type set to stderr. Stderr may
76 // safely be read and written from a different goroutine than
77 // Read and Write respectively.
78 Stderr() io.ReadWriter
79}
80
81// Request is a request sent outside of the normal stream of
82// data. Requests can either be specific to an SSH channel, or they
83// can be global.
84type Request struct {
85 Type string
86 WantReply bool
87 Payload []byte
88
89 ch *channel
90 mux *mux
91}
92
93// Reply sends a response to a request. It must be called for all requests
94// where WantReply is true and is a no-op otherwise. The payload argument is
95// ignored for replies to channel-specific requests.
96func (r *Request) Reply(ok bool, payload []byte) error {
97 if !r.WantReply {
98 return nil
99 }
100
101 if r.ch == nil {
102 return r.mux.ackRequest(ok, payload)
103 }
104
105 return r.ch.ackRequest(ok)
106}
107
108// RejectionReason is an enumeration used when rejecting channel creation
109// requests. See RFC 4254, section 5.1.
110type RejectionReason uint32
111
112const (
113 Prohibited RejectionReason = iota + 1
114 ConnectionFailed
115 UnknownChannelType
116 ResourceShortage
117)
118
119// String converts the rejection reason to human readable form.
120func (r RejectionReason) String() string {
121 switch r {
122 case Prohibited:
123 return "administratively prohibited"
124 case ConnectionFailed:
125 return "connect failed"
126 case UnknownChannelType:
127 return "unknown channel type"
128 case ResourceShortage:
129 return "resource shortage"
130 }
131 return fmt.Sprintf("unknown reason %d", int(r))
132}
133
134func min(a uint32, b int) uint32 {
135 if a < uint32(b) {
136 return a
137 }
138 return uint32(b)
139}
140
141type channelDirection uint8
142
143const (
144 channelInbound channelDirection = iota
145 channelOutbound
146)
147
148// channel is an implementation of the Channel interface that works
149// with the mux class.
150type channel struct {
151 // R/O after creation
152 chanType string
153 extraData []byte
154 localId, remoteId uint32
155
156 // maxIncomingPayload and maxRemotePayload are the maximum
157 // payload sizes of normal and extended data packets for
158 // receiving and sending, respectively. The wire packet will
159 // be 9 or 13 bytes larger (excluding encryption overhead).
160 maxIncomingPayload uint32
161 maxRemotePayload uint32
162
163 mux *mux
164
165 // decided is set to true if an accept or reject message has been sent
166 // (for outbound channels) or received (for inbound channels).
167 decided bool
168
169 // direction contains either channelOutbound, for channels created
170 // locally, or channelInbound, for channels created by the peer.
171 direction channelDirection
172
173 // Pending internal channel messages.
174 msg chan interface{}
175
176 // Since requests have no ID, there can be only one request
177 // with WantReply=true outstanding. This lock is held by a
178 // goroutine that has such an outgoing request pending.
179 sentRequestMu sync.Mutex
180
181 incomingRequests chan *Request
182
183 sentEOF bool
184
185 // thread-safe data
186 remoteWin window
187 pending *buffer
188 extPending *buffer
189
190 // windowMu protects myWindow, the flow-control window.
191 windowMu sync.Mutex
192 myWindow uint32
193
194 // writeMu serializes calls to mux.conn.writePacket() and
195 // protects sentClose and packetPool. This mutex must be
196 // different from windowMu, as writePacket can block if there
197 // is a key exchange pending.
198 writeMu sync.Mutex
199 sentClose bool
200
201 // packetPool has a buffer for each extended channel ID to
202 // save allocations during writes.
203 packetPool map[uint32][]byte
204}
205
206// writePacket sends a packet. If the packet is a channel close, it updates
207// sentClose. This method takes the lock c.writeMu.
208func (c *channel) writePacket(packet []byte) error {
209 c.writeMu.Lock()
210 if c.sentClose {
211 c.writeMu.Unlock()
212 return io.EOF
213 }
214 c.sentClose = (packet[0] == msgChannelClose)
215 err := c.mux.conn.writePacket(packet)
216 c.writeMu.Unlock()
217 return err
218}
219
220func (c *channel) sendMessage(msg interface{}) error {
221 if debugMux {
222 log.Printf("send(%d): %#v", c.mux.chanList.offset, msg)
223 }
224
225 p := Marshal(msg)
226 binary.BigEndian.PutUint32(p[1:], c.remoteId)
227 return c.writePacket(p)
228}
229
230// WriteExtended writes data to a specific extended stream. These streams are
231// used, for example, for stderr.
232func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) {
233 if c.sentEOF {
234 return 0, io.EOF
235 }
236 // 1 byte message type, 4 bytes remoteId, 4 bytes data length
237 opCode := byte(msgChannelData)
238 headerLength := uint32(9)
239 if extendedCode > 0 {
240 headerLength += 4
241 opCode = msgChannelExtendedData
242 }
243
244 c.writeMu.Lock()
245 packet := c.packetPool[extendedCode]
246 // We don't remove the buffer from packetPool, so
247 // WriteExtended calls from different goroutines will be
248 // flagged as errors by the race detector.
249 c.writeMu.Unlock()
250
251 for len(data) > 0 {
252 space := min(c.maxRemotePayload, len(data))
253 if space, err = c.remoteWin.reserve(space); err != nil {
254 return n, err
255 }
256 if want := headerLength + space; uint32(cap(packet)) < want {
257 packet = make([]byte, want)
258 } else {
259 packet = packet[:want]
260 }
261
262 todo := data[:space]
263
264 packet[0] = opCode
265 binary.BigEndian.PutUint32(packet[1:], c.remoteId)
266 if extendedCode > 0 {
267 binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode))
268 }
269 binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo)))
270 copy(packet[headerLength:], todo)
271 if err = c.writePacket(packet); err != nil {
272 return n, err
273 }
274
275 n += len(todo)
276 data = data[len(todo):]
277 }
278
279 c.writeMu.Lock()
280 c.packetPool[extendedCode] = packet
281 c.writeMu.Unlock()
282
283 return n, err
284}
285
286func (c *channel) handleData(packet []byte) error {
287 headerLen := 9
288 isExtendedData := packet[0] == msgChannelExtendedData
289 if isExtendedData {
290 headerLen = 13
291 }
292 if len(packet) < headerLen {
293 // malformed data packet
294 return parseError(packet[0])
295 }
296
297 var extended uint32
298 if isExtendedData {
299 extended = binary.BigEndian.Uint32(packet[5:])
300 }
301
302 length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen])
303 if length == 0 {
304 return nil
305 }
306 if length > c.maxIncomingPayload {
307 // TODO(hanwen): should send Disconnect?
308 return errors.New("ssh: incoming packet exceeds maximum payload size")
309 }
310
311 data := packet[headerLen:]
312 if length != uint32(len(data)) {
313 return errors.New("ssh: wrong packet length")
314 }
315
316 c.windowMu.Lock()
317 if c.myWindow < length {
318 c.windowMu.Unlock()
319 // TODO(hanwen): should send Disconnect with reason?
320 return errors.New("ssh: remote side wrote too much")
321 }
322 c.myWindow -= length
323 c.windowMu.Unlock()
324
325 if extended == 1 {
326 c.extPending.write(data)
327 } else if extended > 0 {
328 // discard other extended data.
329 } else {
330 c.pending.write(data)
331 }
332 return nil
333}
334
335func (c *channel) adjustWindow(n uint32) error {
336 c.windowMu.Lock()
337 // Since myWindow is managed on our side, and can never exceed
338 // the initial window setting, we don't worry about overflow.
339 c.myWindow += uint32(n)
340 c.windowMu.Unlock()
341 return c.sendMessage(windowAdjustMsg{
342 AdditionalBytes: uint32(n),
343 })
344}
345
346func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) {
347 switch extended {
348 case 1:
349 n, err = c.extPending.Read(data)
350 case 0:
351 n, err = c.pending.Read(data)
352 default:
353 return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended)
354 }
355
356 if n > 0 {
357 err = c.adjustWindow(uint32(n))
358 // sendWindowAdjust can return io.EOF if the remote
359 // peer has closed the connection, however we want to
360 // defer forwarding io.EOF to the caller of Read until
361 // the buffer has been drained.
362 if n > 0 && err == io.EOF {
363 err = nil
364 }
365 }
366
367 return n, err
368}
369
370func (c *channel) close() {
371 c.pending.eof()
372 c.extPending.eof()
373 close(c.msg)
374 close(c.incomingRequests)
375 c.writeMu.Lock()
376 // This is not necessary for a normal channel teardown, but if
377 // there was another error, it is.
378 c.sentClose = true
379 c.writeMu.Unlock()
380 // Unblock writers.
381 c.remoteWin.close()
382}
383
384// responseMessageReceived is called when a success or failure message is
385// received on a channel to check that such a message is reasonable for the
386// given channel.
387func (c *channel) responseMessageReceived() error {
388 if c.direction == channelInbound {
389 return errors.New("ssh: channel response message received on inbound channel")
390 }
391 if c.decided {
392 return errors.New("ssh: duplicate response received for channel")
393 }
394 c.decided = true
395 return nil
396}
397
398func (c *channel) handlePacket(packet []byte) error {
399 switch packet[0] {
400 case msgChannelData, msgChannelExtendedData:
401 return c.handleData(packet)
402 case msgChannelClose:
403 c.sendMessage(channelCloseMsg{PeersId: c.remoteId})
404 c.mux.chanList.remove(c.localId)
405 c.close()
406 return nil
407 case msgChannelEOF:
408 // RFC 4254 is mute on how EOF affects dataExt messages but
409 // it is logical to signal EOF at the same time.
410 c.extPending.eof()
411 c.pending.eof()
412 return nil
413 }
414
415 decoded, err := decode(packet)
416 if err != nil {
417 return err
418 }
419
420 switch msg := decoded.(type) {
421 case *channelOpenFailureMsg:
422 if err := c.responseMessageReceived(); err != nil {
423 return err
424 }
425 c.mux.chanList.remove(msg.PeersId)
426 c.msg <- msg
427 case *channelOpenConfirmMsg:
428 if err := c.responseMessageReceived(); err != nil {
429 return err
430 }
431 if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
432 return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize)
433 }
434 c.remoteId = msg.MyId
435 c.maxRemotePayload = msg.MaxPacketSize
436 c.remoteWin.add(msg.MyWindow)
437 c.msg <- msg
438 case *windowAdjustMsg:
439 if !c.remoteWin.add(msg.AdditionalBytes) {
440 return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes)
441 }
442 case *channelRequestMsg:
443 req := Request{
444 Type: msg.Request,
445 WantReply: msg.WantReply,
446 Payload: msg.RequestSpecificData,
447 ch: c,
448 }
449
450 c.incomingRequests <- &req
451 default:
452 c.msg <- msg
453 }
454 return nil
455}
456
457func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel {
458 ch := &channel{
459 remoteWin: window{Cond: newCond()},
460 myWindow: channelWindowSize,
461 pending: newBuffer(),
462 extPending: newBuffer(),
463 direction: direction,
464 incomingRequests: make(chan *Request, chanSize),
465 msg: make(chan interface{}, chanSize),
466 chanType: chanType,
467 extraData: extraData,
468 mux: m,
469 packetPool: make(map[uint32][]byte),
470 }
471 ch.localId = m.chanList.add(ch)
472 return ch
473}
474
475var errUndecided = errors.New("ssh: must Accept or Reject channel")
476var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once")
477
478type extChannel struct {
479 code uint32
480 ch *channel
481}
482
483func (e *extChannel) Write(data []byte) (n int, err error) {
484 return e.ch.WriteExtended(data, e.code)
485}
486
487func (e *extChannel) Read(data []byte) (n int, err error) {
488 return e.ch.ReadExtended(data, e.code)
489}
490
491func (c *channel) Accept() (Channel, <-chan *Request, error) {
492 if c.decided {
493 return nil, nil, errDecidedAlready
494 }
495 c.maxIncomingPayload = channelMaxPacket
496 confirm := channelOpenConfirmMsg{
497 PeersId: c.remoteId,
498 MyId: c.localId,
499 MyWindow: c.myWindow,
500 MaxPacketSize: c.maxIncomingPayload,
501 }
502 c.decided = true
503 if err := c.sendMessage(confirm); err != nil {
504 return nil, nil, err
505 }
506
507 return c, c.incomingRequests, nil
508}
509
510func (ch *channel) Reject(reason RejectionReason, message string) error {
511 if ch.decided {
512 return errDecidedAlready
513 }
514 reject := channelOpenFailureMsg{
515 PeersId: ch.remoteId,
516 Reason: reason,
517 Message: message,
518 Language: "en",
519 }
520 ch.decided = true
521 return ch.sendMessage(reject)
522}
523
524func (ch *channel) Read(data []byte) (int, error) {
525 if !ch.decided {
526 return 0, errUndecided
527 }
528 return ch.ReadExtended(data, 0)
529}
530
531func (ch *channel) Write(data []byte) (int, error) {
532 if !ch.decided {
533 return 0, errUndecided
534 }
535 return ch.WriteExtended(data, 0)
536}
537
538func (ch *channel) CloseWrite() error {
539 if !ch.decided {
540 return errUndecided
541 }
542 ch.sentEOF = true
543 return ch.sendMessage(channelEOFMsg{
544 PeersId: ch.remoteId})
545}
546
547func (ch *channel) Close() error {
548 if !ch.decided {
549 return errUndecided
550 }
551
552 return ch.sendMessage(channelCloseMsg{
553 PeersId: ch.remoteId})
554}
555
556// Extended returns an io.ReadWriter that sends and receives data on the given,
557// SSH extended stream. Such streams are used, for example, for stderr.
558func (ch *channel) Extended(code uint32) io.ReadWriter {
559 if !ch.decided {
560 return nil
561 }
562 return &extChannel{code, ch}
563}
564
565func (ch *channel) Stderr() io.ReadWriter {
566 return ch.Extended(1)
567}
568
569func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
570 if !ch.decided {
571 return false, errUndecided
572 }
573
574 if wantReply {
575 ch.sentRequestMu.Lock()
576 defer ch.sentRequestMu.Unlock()
577 }
578
579 msg := channelRequestMsg{
580 PeersId: ch.remoteId,
581 Request: name,
582 WantReply: wantReply,
583 RequestSpecificData: payload,
584 }
585
586 if err := ch.sendMessage(msg); err != nil {
587 return false, err
588 }
589
590 if wantReply {
591 m, ok := (<-ch.msg)
592 if !ok {
593 return false, io.EOF
594 }
595 switch m.(type) {
596 case *channelRequestFailureMsg:
597 return false, nil
598 case *channelRequestSuccessMsg:
599 return true, nil
600 default:
601 return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m)
602 }
603 }
604
605 return false, nil
606}
607
608// ackRequest either sends an ack or nack to the channel request.
609func (ch *channel) ackRequest(ok bool) error {
610 if !ch.decided {
611 return errUndecided
612 }
613
614 var msg interface{}
615 if !ok {
616 msg = channelRequestFailureMsg{
617 PeersId: ch.remoteId,
618 }
619 } else {
620 msg = channelRequestSuccessMsg{
621 PeersId: ch.remoteId,
622 }
623 }
624 return ch.sendMessage(msg)
625}
626
627func (ch *channel) ChannelType() string {
628 return ch.chanType
629}
630
631func (ch *channel) ExtraData() []byte {
632 return ch.extraData
633}
diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go
new file mode 100644
index 0000000..13484ab
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/cipher.go
@@ -0,0 +1,627 @@
1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "crypto/aes"
9 "crypto/cipher"
10 "crypto/des"
11 "crypto/rc4"
12 "crypto/subtle"
13 "encoding/binary"
14 "errors"
15 "fmt"
16 "hash"
17 "io"
18 "io/ioutil"
19)
20
21const (
22 packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher.
23
24 // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations
25 // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC
26 // indicates implementations SHOULD be able to handle larger packet sizes, but then
27 // waffles on about reasonable limits.
28 //
29 // OpenSSH caps their maxPacket at 256kB so we choose to do
30 // the same. maxPacket is also used to ensure that uint32
31 // length fields do not overflow, so it should remain well
32 // below 4G.
33 maxPacket = 256 * 1024
34)
35
36// noneCipher implements cipher.Stream and provides no encryption. It is used
37// by the transport before the first key-exchange.
38type noneCipher struct{}
39
40func (c noneCipher) XORKeyStream(dst, src []byte) {
41 copy(dst, src)
42}
43
44func newAESCTR(key, iv []byte) (cipher.Stream, error) {
45 c, err := aes.NewCipher(key)
46 if err != nil {
47 return nil, err
48 }
49 return cipher.NewCTR(c, iv), nil
50}
51
52func newRC4(key, iv []byte) (cipher.Stream, error) {
53 return rc4.NewCipher(key)
54}
55
56type streamCipherMode struct {
57 keySize int
58 ivSize int
59 skip int
60 createFunc func(key, iv []byte) (cipher.Stream, error)
61}
62
63func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) {
64 if len(key) < c.keySize {
65 panic("ssh: key length too small for cipher")
66 }
67 if len(iv) < c.ivSize {
68 panic("ssh: iv too small for cipher")
69 }
70
71 stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize])
72 if err != nil {
73 return nil, err
74 }
75
76 var streamDump []byte
77 if c.skip > 0 {
78 streamDump = make([]byte, 512)
79 }
80
81 for remainingToDump := c.skip; remainingToDump > 0; {
82 dumpThisTime := remainingToDump
83 if dumpThisTime > len(streamDump) {
84 dumpThisTime = len(streamDump)
85 }
86 stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime])
87 remainingToDump -= dumpThisTime
88 }
89
90 return stream, nil
91}
92
93// cipherModes documents properties of supported ciphers. Ciphers not included
94// are not supported and will not be negotiated, even if explicitly requested in
95// ClientConfig.Crypto.Ciphers.
96var cipherModes = map[string]*streamCipherMode{
97 // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms
98 // are defined in the order specified in the RFC.
99 "aes128-ctr": {16, aes.BlockSize, 0, newAESCTR},
100 "aes192-ctr": {24, aes.BlockSize, 0, newAESCTR},
101 "aes256-ctr": {32, aes.BlockSize, 0, newAESCTR},
102
103 // Ciphers from RFC4345, which introduces security-improved arcfour ciphers.
104 // They are defined in the order specified in the RFC.
105 "arcfour128": {16, 0, 1536, newRC4},
106 "arcfour256": {32, 0, 1536, newRC4},
107
108 // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
109 // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
110 // RC4) has problems with weak keys, and should be used with caution."
111 // RFC4345 introduces improved versions of Arcfour.
112 "arcfour": {16, 0, 0, newRC4},
113
114 // AES-GCM is not a stream cipher, so it is constructed with a
115 // special case. If we add any more non-stream ciphers, we
116 // should invest a cleaner way to do this.
117 gcmCipherID: {16, 12, 0, nil},
118
119 // CBC mode is insecure and so is not included in the default config.
120 // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely
121 // needed, it's possible to specify a custom Config to enable it.
122 // You should expect that an active attacker can recover plaintext if
123 // you do.
124 aes128cbcID: {16, aes.BlockSize, 0, nil},
125
126 // 3des-cbc is insecure and is disabled by default.
127 tripledescbcID: {24, des.BlockSize, 0, nil},
128}
129
130// prefixLen is the length of the packet prefix that contains the packet length
131// and number of padding bytes.
132const prefixLen = 5
133
134// streamPacketCipher is a packetCipher using a stream cipher.
135type streamPacketCipher struct {
136 mac hash.Hash
137 cipher cipher.Stream
138 etm bool
139
140 // The following members are to avoid per-packet allocations.
141 prefix [prefixLen]byte
142 seqNumBytes [4]byte
143 padding [2 * packetSizeMultiple]byte
144 packetData []byte
145 macResult []byte
146}
147
148// readPacket reads and decrypt a single packet from the reader argument.
149func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
150 if _, err := io.ReadFull(r, s.prefix[:]); err != nil {
151 return nil, err
152 }
153
154 var encryptedPaddingLength [1]byte
155 if s.mac != nil && s.etm {
156 copy(encryptedPaddingLength[:], s.prefix[4:5])
157 s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5])
158 } else {
159 s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
160 }
161
162 length := binary.BigEndian.Uint32(s.prefix[0:4])
163 paddingLength := uint32(s.prefix[4])
164
165 var macSize uint32
166 if s.mac != nil {
167 s.mac.Reset()
168 binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
169 s.mac.Write(s.seqNumBytes[:])
170 if s.etm {
171 s.mac.Write(s.prefix[:4])
172 s.mac.Write(encryptedPaddingLength[:])
173 } else {
174 s.mac.Write(s.prefix[:])
175 }
176 macSize = uint32(s.mac.Size())
177 }
178
179 if length <= paddingLength+1 {
180 return nil, errors.New("ssh: invalid packet length, packet too small")
181 }
182
183 if length > maxPacket {
184 return nil, errors.New("ssh: invalid packet length, packet too large")
185 }
186
187 // the maxPacket check above ensures that length-1+macSize
188 // does not overflow.
189 if uint32(cap(s.packetData)) < length-1+macSize {
190 s.packetData = make([]byte, length-1+macSize)
191 } else {
192 s.packetData = s.packetData[:length-1+macSize]
193 }
194
195 if _, err := io.ReadFull(r, s.packetData); err != nil {
196 return nil, err
197 }
198 mac := s.packetData[length-1:]
199 data := s.packetData[:length-1]
200
201 if s.mac != nil && s.etm {
202 s.mac.Write(data)
203 }
204
205 s.cipher.XORKeyStream(data, data)
206
207 if s.mac != nil {
208 if !s.etm {
209 s.mac.Write(data)
210 }
211 s.macResult = s.mac.Sum(s.macResult[:0])
212 if subtle.ConstantTimeCompare(s.macResult, mac) != 1 {
213 return nil, errors.New("ssh: MAC failure")
214 }
215 }
216
217 return s.packetData[:length-paddingLength-1], nil
218}
219
220// writePacket encrypts and sends a packet of data to the writer argument
221func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
222 if len(packet) > maxPacket {
223 return errors.New("ssh: packet too large")
224 }
225
226 aadlen := 0
227 if s.mac != nil && s.etm {
228 // packet length is not encrypted for EtM modes
229 aadlen = 4
230 }
231
232 paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple
233 if paddingLength < 4 {
234 paddingLength += packetSizeMultiple
235 }
236
237 length := len(packet) + 1 + paddingLength
238 binary.BigEndian.PutUint32(s.prefix[:], uint32(length))
239 s.prefix[4] = byte(paddingLength)
240 padding := s.padding[:paddingLength]
241 if _, err := io.ReadFull(rand, padding); err != nil {
242 return err
243 }
244
245 if s.mac != nil {
246 s.mac.Reset()
247 binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
248 s.mac.Write(s.seqNumBytes[:])
249
250 if s.etm {
251 // For EtM algorithms, the packet length must stay unencrypted,
252 // but the following data (padding length) must be encrypted
253 s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5])
254 }
255
256 s.mac.Write(s.prefix[:])
257
258 if !s.etm {
259 // For non-EtM algorithms, the algorithm is applied on unencrypted data
260 s.mac.Write(packet)
261 s.mac.Write(padding)
262 }
263 }
264
265 if !(s.mac != nil && s.etm) {
266 // For EtM algorithms, the padding length has already been encrypted
267 // and the packet length must remain unencrypted
268 s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
269 }
270
271 s.cipher.XORKeyStream(packet, packet)
272 s.cipher.XORKeyStream(padding, padding)
273
274 if s.mac != nil && s.etm {
275 // For EtM algorithms, packet and padding must be encrypted
276 s.mac.Write(packet)
277 s.mac.Write(padding)
278 }
279
280 if _, err := w.Write(s.prefix[:]); err != nil {
281 return err
282 }
283 if _, err := w.Write(packet); err != nil {
284 return err
285 }
286 if _, err := w.Write(padding); err != nil {
287 return err
288 }
289
290 if s.mac != nil {
291 s.macResult = s.mac.Sum(s.macResult[:0])
292 if _, err := w.Write(s.macResult); err != nil {
293 return err
294 }
295 }
296
297 return nil
298}
299
300type gcmCipher struct {
301 aead cipher.AEAD
302 prefix [4]byte
303 iv []byte
304 buf []byte
305}
306
307func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) {
308 c, err := aes.NewCipher(key)
309 if err != nil {
310 return nil, err
311 }
312
313 aead, err := cipher.NewGCM(c)
314 if err != nil {
315 return nil, err
316 }
317
318 return &gcmCipher{
319 aead: aead,
320 iv: iv,
321 }, nil
322}
323
324const gcmTagSize = 16
325
326func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
327 // Pad out to multiple of 16 bytes. This is different from the
328 // stream cipher because that encrypts the length too.
329 padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple)
330 if padding < 4 {
331 padding += packetSizeMultiple
332 }
333
334 length := uint32(len(packet) + int(padding) + 1)
335 binary.BigEndian.PutUint32(c.prefix[:], length)
336 if _, err := w.Write(c.prefix[:]); err != nil {
337 return err
338 }
339
340 if cap(c.buf) < int(length) {
341 c.buf = make([]byte, length)
342 } else {
343 c.buf = c.buf[:length]
344 }
345
346 c.buf[0] = padding
347 copy(c.buf[1:], packet)
348 if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil {
349 return err
350 }
351 c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:])
352 if _, err := w.Write(c.buf); err != nil {
353 return err
354 }
355 c.incIV()
356
357 return nil
358}
359
360func (c *gcmCipher) incIV() {
361 for i := 4 + 7; i >= 4; i-- {
362 c.iv[i]++
363 if c.iv[i] != 0 {
364 break
365 }
366 }
367}
368
369func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
370 if _, err := io.ReadFull(r, c.prefix[:]); err != nil {
371 return nil, err
372 }
373 length := binary.BigEndian.Uint32(c.prefix[:])
374 if length > maxPacket {
375 return nil, errors.New("ssh: max packet length exceeded.")
376 }
377
378 if cap(c.buf) < int(length+gcmTagSize) {
379 c.buf = make([]byte, length+gcmTagSize)
380 } else {
381 c.buf = c.buf[:length+gcmTagSize]
382 }
383
384 if _, err := io.ReadFull(r, c.buf); err != nil {
385 return nil, err
386 }
387
388 plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:])
389 if err != nil {
390 return nil, err
391 }
392 c.incIV()
393
394 padding := plain[0]
395 if padding < 4 || padding >= 20 {
396 return nil, fmt.Errorf("ssh: illegal padding %d", padding)
397 }
398
399 if int(padding+1) >= len(plain) {
400 return nil, fmt.Errorf("ssh: padding %d too large", padding)
401 }
402 plain = plain[1 : length-uint32(padding)]
403 return plain, nil
404}
405
406// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1
407type cbcCipher struct {
408 mac hash.Hash
409 macSize uint32
410 decrypter cipher.BlockMode
411 encrypter cipher.BlockMode
412
413 // The following members are to avoid per-packet allocations.
414 seqNumBytes [4]byte
415 packetData []byte
416 macResult []byte
417
418 // Amount of data we should still read to hide which
419 // verification error triggered.
420 oracleCamouflage uint32
421}
422
423func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
424 cbc := &cbcCipher{
425 mac: macModes[algs.MAC].new(macKey),
426 decrypter: cipher.NewCBCDecrypter(c, iv),
427 encrypter: cipher.NewCBCEncrypter(c, iv),
428 packetData: make([]byte, 1024),
429 }
430 if cbc.mac != nil {
431 cbc.macSize = uint32(cbc.mac.Size())
432 }
433
434 return cbc, nil
435}
436
437func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
438 c, err := aes.NewCipher(key)
439 if err != nil {
440 return nil, err
441 }
442
443 cbc, err := newCBCCipher(c, iv, key, macKey, algs)
444 if err != nil {
445 return nil, err
446 }
447
448 return cbc, nil
449}
450
451func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
452 c, err := des.NewTripleDESCipher(key)
453 if err != nil {
454 return nil, err
455 }
456
457 cbc, err := newCBCCipher(c, iv, key, macKey, algs)
458 if err != nil {
459 return nil, err
460 }
461
462 return cbc, nil
463}
464
465func maxUInt32(a, b int) uint32 {
466 if a > b {
467 return uint32(a)
468 }
469 return uint32(b)
470}
471
472const (
473 cbcMinPacketSizeMultiple = 8
474 cbcMinPacketSize = 16
475 cbcMinPaddingSize = 4
476)
477
478// cbcError represents a verification error that may leak information.
479type cbcError string
480
481func (e cbcError) Error() string { return string(e) }
482
483func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
484 p, err := c.readPacketLeaky(seqNum, r)
485 if err != nil {
486 if _, ok := err.(cbcError); ok {
487 // Verification error: read a fixed amount of
488 // data, to make distinguishing between
489 // failing MAC and failing length check more
490 // difficult.
491 io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage))
492 }
493 }
494 return p, err
495}
496
497func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) {
498 blockSize := c.decrypter.BlockSize()
499
500 // Read the header, which will include some of the subsequent data in the
501 // case of block ciphers - this is copied back to the payload later.
502 // How many bytes of payload/padding will be read with this first read.
503 firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize)
504 firstBlock := c.packetData[:firstBlockLength]
505 if _, err := io.ReadFull(r, firstBlock); err != nil {
506 return nil, err
507 }
508
509 c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength
510
511 c.decrypter.CryptBlocks(firstBlock, firstBlock)
512 length := binary.BigEndian.Uint32(firstBlock[:4])
513 if length > maxPacket {
514 return nil, cbcError("ssh: packet too large")
515 }
516 if length+4 < maxUInt32(cbcMinPacketSize, blockSize) {
517 // The minimum size of a packet is 16 (or the cipher block size, whichever
518 // is larger) bytes.
519 return nil, cbcError("ssh: packet too small")
520 }
521 // The length of the packet (including the length field but not the MAC) must
522 // be a multiple of the block size or 8, whichever is larger.
523 if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 {
524 return nil, cbcError("ssh: invalid packet length multiple")
525 }
526
527 paddingLength := uint32(firstBlock[4])
528 if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 {
529 return nil, cbcError("ssh: invalid packet length")
530 }
531
532 // Positions within the c.packetData buffer:
533 macStart := 4 + length
534 paddingStart := macStart - paddingLength
535
536 // Entire packet size, starting before length, ending at end of mac.
537 entirePacketSize := macStart + c.macSize
538
539 // Ensure c.packetData is large enough for the entire packet data.
540 if uint32(cap(c.packetData)) < entirePacketSize {
541 // Still need to upsize and copy, but this should be rare at runtime, only
542 // on upsizing the packetData buffer.
543 c.packetData = make([]byte, entirePacketSize)
544 copy(c.packetData, firstBlock)
545 } else {
546 c.packetData = c.packetData[:entirePacketSize]
547 }
548
549 if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil {
550 return nil, err
551 } else {
552 c.oracleCamouflage -= uint32(n)
553 }
554
555 remainingCrypted := c.packetData[firstBlockLength:macStart]
556 c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted)
557
558 mac := c.packetData[macStart:]
559 if c.mac != nil {
560 c.mac.Reset()
561 binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
562 c.mac.Write(c.seqNumBytes[:])
563 c.mac.Write(c.packetData[:macStart])
564 c.macResult = c.mac.Sum(c.macResult[:0])
565 if subtle.ConstantTimeCompare(c.macResult, mac) != 1 {
566 return nil, cbcError("ssh: MAC failure")
567 }
568 }
569
570 return c.packetData[prefixLen:paddingStart], nil
571}
572
573func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
574 effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize())
575
576 // Length of encrypted portion of the packet (header, payload, padding).
577 // Enforce minimum padding and packet size.
578 encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize)
579 // Enforce block size.
580 encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize
581
582 length := encLength - 4
583 paddingLength := int(length) - (1 + len(packet))
584
585 // Overall buffer contains: header, payload, padding, mac.
586 // Space for the MAC is reserved in the capacity but not the slice length.
587 bufferSize := encLength + c.macSize
588 if uint32(cap(c.packetData)) < bufferSize {
589 c.packetData = make([]byte, encLength, bufferSize)
590 } else {
591 c.packetData = c.packetData[:encLength]
592 }
593
594 p := c.packetData
595
596 // Packet header.
597 binary.BigEndian.PutUint32(p, length)
598 p = p[4:]
599 p[0] = byte(paddingLength)
600
601 // Payload.
602 p = p[1:]
603 copy(p, packet)
604
605 // Padding.
606 p = p[len(packet):]
607 if _, err := io.ReadFull(rand, p); err != nil {
608 return err
609 }
610
611 if c.mac != nil {
612 c.mac.Reset()
613 binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
614 c.mac.Write(c.seqNumBytes[:])
615 c.mac.Write(c.packetData)
616 // The MAC is now appended into the capacity reserved for it earlier.
617 c.packetData = c.mac.Sum(c.packetData)
618 }
619
620 c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength])
621
622 if _, err := w.Write(c.packetData); err != nil {
623 return err
624 }
625
626 return nil
627}
diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go
new file mode 100644
index 0000000..c97f297
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/client.go
@@ -0,0 +1,211 @@
1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "errors"
9 "fmt"
10 "net"
11 "sync"
12 "time"
13)
14
15// Client implements a traditional SSH client that supports shells,
16// subprocesses, port forwarding and tunneled dialing.
17type Client struct {
18 Conn
19
20 forwards forwardList // forwarded tcpip connections from the remote side
21 mu sync.Mutex
22 channelHandlers map[string]chan NewChannel
23}
24
25// HandleChannelOpen returns a channel on which NewChannel requests
26// for the given type are sent. If the type already is being handled,
27// nil is returned. The channel is closed when the connection is closed.
28func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel {
29 c.mu.Lock()
30 defer c.mu.Unlock()
31 if c.channelHandlers == nil {
32 // The SSH channel has been closed.
33 c := make(chan NewChannel)
34 close(c)
35 return c
36 }
37
38 ch := c.channelHandlers[channelType]
39 if ch != nil {
40 return nil
41 }
42
43 ch = make(chan NewChannel, chanSize)
44 c.channelHandlers[channelType] = ch
45 return ch
46}
47
48// NewClient creates a Client on top of the given connection.
49func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client {
50 conn := &Client{
51 Conn: c,
52 channelHandlers: make(map[string]chan NewChannel, 1),
53 }
54
55 go conn.handleGlobalRequests(reqs)
56 go conn.handleChannelOpens(chans)
57 go func() {
58 conn.Wait()
59 conn.forwards.closeAll()
60 }()
61 go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip"))
62 return conn
63}
64
65// NewClientConn establishes an authenticated SSH connection using c
66// as the underlying transport. The Request and NewChannel channels
67// must be serviced or the connection will hang.
68func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) {
69 fullConf := *config
70 fullConf.SetDefaults()
71 conn := &connection{
72 sshConn: sshConn{conn: c},
73 }
74
75 if err := conn.clientHandshake(addr, &fullConf); err != nil {
76 c.Close()
77 return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err)
78 }
79 conn.mux = newMux(conn.transport)
80 return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil
81}
82
83// clientHandshake performs the client side key exchange. See RFC 4253 Section
84// 7.
85func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error {
86 if config.ClientVersion != "" {
87 c.clientVersion = []byte(config.ClientVersion)
88 } else {
89 c.clientVersion = []byte(packageVersion)
90 }
91 var err error
92 c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion)
93 if err != nil {
94 return err
95 }
96
97 c.transport = newClientTransport(
98 newTransport(c.sshConn.conn, config.Rand, true /* is client */),
99 c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr())
100 if err := c.transport.waitSession(); err != nil {
101 return err
102 }
103
104 c.sessionID = c.transport.getSessionID()
105 return c.clientAuthenticate(config)
106}
107
108// verifyHostKeySignature verifies the host key obtained in the key
109// exchange.
110func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error {
111 sig, rest, ok := parseSignatureBody(result.Signature)
112 if len(rest) > 0 || !ok {
113 return errors.New("ssh: signature parse error")
114 }
115
116 return hostKey.Verify(result.H, sig)
117}
118
119// NewSession opens a new Session for this client. (A session is a remote
120// execution of a program.)
121func (c *Client) NewSession() (*Session, error) {
122 ch, in, err := c.OpenChannel("session", nil)
123 if err != nil {
124 return nil, err
125 }
126 return newSession(ch, in)
127}
128
129func (c *Client) handleGlobalRequests(incoming <-chan *Request) {
130 for r := range incoming {
131 // This handles keepalive messages and matches
132 // the behaviour of OpenSSH.
133 r.Reply(false, nil)
134 }
135}
136
137// handleChannelOpens channel open messages from the remote side.
138func (c *Client) handleChannelOpens(in <-chan NewChannel) {
139 for ch := range in {
140 c.mu.Lock()
141 handler := c.channelHandlers[ch.ChannelType()]
142 c.mu.Unlock()
143
144 if handler != nil {
145 handler <- ch
146 } else {
147 ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType()))
148 }
149 }
150
151 c.mu.Lock()
152 for _, ch := range c.channelHandlers {
153 close(ch)
154 }
155 c.channelHandlers = nil
156 c.mu.Unlock()
157}
158
159// Dial starts a client connection to the given SSH server. It is a
160// convenience function that connects to the given network address,
161// initiates the SSH handshake, and then sets up a Client. For access
162// to incoming channels and requests, use net.Dial with NewClientConn
163// instead.
164func Dial(network, addr string, config *ClientConfig) (*Client, error) {
165 conn, err := net.DialTimeout(network, addr, config.Timeout)
166 if err != nil {
167 return nil, err
168 }
169 c, chans, reqs, err := NewClientConn(conn, addr, config)
170 if err != nil {
171 return nil, err
172 }
173 return NewClient(c, chans, reqs), nil
174}
175
176// A ClientConfig structure is used to configure a Client. It must not be
177// modified after having been passed to an SSH function.
178type ClientConfig struct {
179 // Config contains configuration that is shared between clients and
180 // servers.
181 Config
182
183 // User contains the username to authenticate as.
184 User string
185
186 // Auth contains possible authentication methods to use with the
187 // server. Only the first instance of a particular RFC 4252 method will
188 // be used during authentication.
189 Auth []AuthMethod
190
191 // HostKeyCallback, if not nil, is called during the cryptographic
192 // handshake to validate the server's host key. A nil HostKeyCallback
193 // implies that all host keys are accepted.
194 HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
195
196 // ClientVersion contains the version identification string that will
197 // be used for the connection. If empty, a reasonable default is used.
198 ClientVersion string
199
200 // HostKeyAlgorithms lists the key types that the client will
201 // accept from the server as host key, in order of
202 // preference. If empty, a reasonable default is used. Any
203 // string returned from PublicKey.Type method may be used, or
204 // any of the CertAlgoXxxx and KeyAlgoXxxx constants.
205 HostKeyAlgorithms []string
206
207 // Timeout is the maximum amount of time for the TCP connection to establish.
208 //
209 // A Timeout of zero means no timeout.
210 Timeout time.Duration
211}
diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go
new file mode 100644
index 0000000..fd1ec5d
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/client_auth.go
@@ -0,0 +1,475 @@
1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "bytes"
9 "errors"
10 "fmt"
11 "io"
12)
13
14// clientAuthenticate authenticates with the remote server. See RFC 4252.
15func (c *connection) clientAuthenticate(config *ClientConfig) error {
16 // initiate user auth session
17 if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil {
18 return err
19 }
20 packet, err := c.transport.readPacket()
21 if err != nil {
22 return err
23 }
24 var serviceAccept serviceAcceptMsg
25 if err := Unmarshal(packet, &serviceAccept); err != nil {
26 return err
27 }
28
29 // during the authentication phase the client first attempts the "none" method
30 // then any untried methods suggested by the server.
31 tried := make(map[string]bool)
32 var lastMethods []string
33
34 sessionID := c.transport.getSessionID()
35 for auth := AuthMethod(new(noneAuth)); auth != nil; {
36 ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand)
37 if err != nil {
38 return err
39 }
40 if ok {
41 // success
42 return nil
43 }
44 tried[auth.method()] = true
45 if methods == nil {
46 methods = lastMethods
47 }
48 lastMethods = methods
49
50 auth = nil
51
52 findNext:
53 for _, a := range config.Auth {
54 candidateMethod := a.method()
55 if tried[candidateMethod] {
56 continue
57 }
58 for _, meth := range methods {
59 if meth == candidateMethod {
60 auth = a
61 break findNext
62 }
63 }
64 }
65 }
66 return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried))
67}
68
69func keys(m map[string]bool) []string {
70 s := make([]string, 0, len(m))
71
72 for key := range m {
73 s = append(s, key)
74 }
75 return s
76}
77
78// An AuthMethod represents an instance of an RFC 4252 authentication method.
79type AuthMethod interface {
80 // auth authenticates user over transport t.
81 // Returns true if authentication is successful.
82 // If authentication is not successful, a []string of alternative
83 // method names is returned. If the slice is nil, it will be ignored
84 // and the previous set of possible methods will be reused.
85 auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error)
86
87 // method returns the RFC 4252 method name.
88 method() string
89}
90
91// "none" authentication, RFC 4252 section 5.2.
92type noneAuth int
93
94func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
95 if err := c.writePacket(Marshal(&userAuthRequestMsg{
96 User: user,
97 Service: serviceSSH,
98 Method: "none",
99 })); err != nil {
100 return false, nil, err
101 }
102
103 return handleAuthResponse(c)
104}
105
106func (n *noneAuth) method() string {
107 return "none"
108}
109
110// passwordCallback is an AuthMethod that fetches the password through
111// a function call, e.g. by prompting the user.
112type passwordCallback func() (password string, err error)
113
114func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
115 type passwordAuthMsg struct {
116 User string `sshtype:"50"`
117 Service string
118 Method string
119 Reply bool
120 Password string
121 }
122
123 pw, err := cb()
124 // REVIEW NOTE: is there a need to support skipping a password attempt?
125 // The program may only find out that the user doesn't have a password
126 // when prompting.
127 if err != nil {
128 return false, nil, err
129 }
130
131 if err := c.writePacket(Marshal(&passwordAuthMsg{
132 User: user,
133 Service: serviceSSH,
134 Method: cb.method(),
135 Reply: false,
136 Password: pw,
137 })); err != nil {
138 return false, nil, err
139 }
140
141 return handleAuthResponse(c)
142}
143
144func (cb passwordCallback) method() string {
145 return "password"
146}
147
148// Password returns an AuthMethod using the given password.
149func Password(secret string) AuthMethod {
150 return passwordCallback(func() (string, error) { return secret, nil })
151}
152
153// PasswordCallback returns an AuthMethod that uses a callback for
154// fetching a password.
155func PasswordCallback(prompt func() (secret string, err error)) AuthMethod {
156 return passwordCallback(prompt)
157}
158
159type publickeyAuthMsg struct {
160 User string `sshtype:"50"`
161 Service string
162 Method string
163 // HasSig indicates to the receiver packet that the auth request is signed and
164 // should be used for authentication of the request.
165 HasSig bool
166 Algoname string
167 PubKey []byte
168 // Sig is tagged with "rest" so Marshal will exclude it during
169 // validateKey
170 Sig []byte `ssh:"rest"`
171}
172
173// publicKeyCallback is an AuthMethod that uses a set of key
174// pairs for authentication.
175type publicKeyCallback func() ([]Signer, error)
176
177func (cb publicKeyCallback) method() string {
178 return "publickey"
179}
180
181func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
182 // Authentication is performed in two stages. The first stage sends an
183 // enquiry to test if each key is acceptable to the remote. The second
184 // stage attempts to authenticate with the valid keys obtained in the
185 // first stage.
186
187 signers, err := cb()
188 if err != nil {
189 return false, nil, err
190 }
191 var validKeys []Signer
192 for _, signer := range signers {
193 if ok, err := validateKey(signer.PublicKey(), user, c); ok {
194 validKeys = append(validKeys, signer)
195 } else {
196 if err != nil {
197 return false, nil, err
198 }
199 }
200 }
201
202 // methods that may continue if this auth is not successful.
203 var methods []string
204 for _, signer := range validKeys {
205 pub := signer.PublicKey()
206
207 pubKey := pub.Marshal()
208 sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{
209 User: user,
210 Service: serviceSSH,
211 Method: cb.method(),
212 }, []byte(pub.Type()), pubKey))
213 if err != nil {
214 return false, nil, err
215 }
216
217 // manually wrap the serialized signature in a string
218 s := Marshal(sign)
219 sig := make([]byte, stringLength(len(s)))
220 marshalString(sig, s)
221 msg := publickeyAuthMsg{
222 User: user,
223 Service: serviceSSH,
224 Method: cb.method(),
225 HasSig: true,
226 Algoname: pub.Type(),
227 PubKey: pubKey,
228 Sig: sig,
229 }
230 p := Marshal(&msg)
231 if err := c.writePacket(p); err != nil {
232 return false, nil, err
233 }
234 var success bool
235 success, methods, err = handleAuthResponse(c)
236 if err != nil {
237 return false, nil, err
238 }
239 if success {
240 return success, methods, err
241 }
242 }
243 return false, methods, nil
244}
245
246// validateKey validates the key provided is acceptable to the server.
247func validateKey(key PublicKey, user string, c packetConn) (bool, error) {
248 pubKey := key.Marshal()
249 msg := publickeyAuthMsg{
250 User: user,
251 Service: serviceSSH,
252 Method: "publickey",
253 HasSig: false,
254 Algoname: key.Type(),
255 PubKey: pubKey,
256 }
257 if err := c.writePacket(Marshal(&msg)); err != nil {
258 return false, err
259 }
260
261 return confirmKeyAck(key, c)
262}
263
264func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
265 pubKey := key.Marshal()
266 algoname := key.Type()
267
268 for {
269 packet, err := c.readPacket()
270 if err != nil {
271 return false, err
272 }
273 switch packet[0] {
274 case msgUserAuthBanner:
275 // TODO(gpaul): add callback to present the banner to the user
276 case msgUserAuthPubKeyOk:
277 var msg userAuthPubKeyOkMsg
278 if err := Unmarshal(packet, &msg); err != nil {
279 return false, err
280 }
281 if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) {
282 return false, nil
283 }
284 return true, nil
285 case msgUserAuthFailure:
286 return false, nil
287 default:
288 return false, unexpectedMessageError(msgUserAuthSuccess, packet[0])
289 }
290 }
291}
292
293// PublicKeys returns an AuthMethod that uses the given key
294// pairs.
295func PublicKeys(signers ...Signer) AuthMethod {
296 return publicKeyCallback(func() ([]Signer, error) { return signers, nil })
297}
298
299// PublicKeysCallback returns an AuthMethod that runs the given
300// function to obtain a list of key pairs.
301func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod {
302 return publicKeyCallback(getSigners)
303}
304
305// handleAuthResponse returns whether the preceding authentication request succeeded
306// along with a list of remaining authentication methods to try next and
307// an error if an unexpected response was received.
308func handleAuthResponse(c packetConn) (bool, []string, error) {
309 for {
310 packet, err := c.readPacket()
311 if err != nil {
312 return false, nil, err
313 }
314
315 switch packet[0] {
316 case msgUserAuthBanner:
317 // TODO: add callback to present the banner to the user
318 case msgUserAuthFailure:
319 var msg userAuthFailureMsg
320 if err := Unmarshal(packet, &msg); err != nil {
321 return false, nil, err
322 }
323 return false, msg.Methods, nil
324 case msgUserAuthSuccess:
325 return true, nil, nil
326 default:
327 return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0])
328 }
329 }
330}
331
332// KeyboardInteractiveChallenge should print questions, optionally
333// disabling echoing (e.g. for passwords), and return all the answers.
334// Challenge may be called multiple times in a single session. After
335// successful authentication, the server may send a challenge with no
336// questions, for which the user and instruction messages should be
337// printed. RFC 4256 section 3.3 details how the UI should behave for
338// both CLI and GUI environments.
339type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error)
340
341// KeyboardInteractive returns a AuthMethod using a prompt/response
342// sequence controlled by the server.
343func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod {
344 return challenge
345}
346
347func (cb KeyboardInteractiveChallenge) method() string {
348 return "keyboard-interactive"
349}
350
351func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
352 type initiateMsg struct {
353 User string `sshtype:"50"`
354 Service string
355 Method string
356 Language string
357 Submethods string
358 }
359
360 if err := c.writePacket(Marshal(&initiateMsg{
361 User: user,
362 Service: serviceSSH,
363 Method: "keyboard-interactive",
364 })); err != nil {
365 return false, nil, err
366 }
367
368 for {
369 packet, err := c.readPacket()
370 if err != nil {
371 return false, nil, err
372 }
373
374 // like handleAuthResponse, but with less options.
375 switch packet[0] {
376 case msgUserAuthBanner:
377 // TODO: Print banners during userauth.
378 continue
379 case msgUserAuthInfoRequest:
380 // OK
381 case msgUserAuthFailure:
382 var msg userAuthFailureMsg
383 if err := Unmarshal(packet, &msg); err != nil {
384 return false, nil, err
385 }
386 return false, msg.Methods, nil
387 case msgUserAuthSuccess:
388 return true, nil, nil
389 default:
390 return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
391 }
392
393 var msg userAuthInfoRequestMsg
394 if err := Unmarshal(packet, &msg); err != nil {
395 return false, nil, err
396 }
397
398 // Manually unpack the prompt/echo pairs.
399 rest := msg.Prompts
400 var prompts []string
401 var echos []bool
402 for i := 0; i < int(msg.NumPrompts); i++ {
403 prompt, r, ok := parseString(rest)
404 if !ok || len(r) == 0 {
405 return false, nil, errors.New("ssh: prompt format error")
406 }
407 prompts = append(prompts, string(prompt))
408 echos = append(echos, r[0] != 0)
409 rest = r[1:]
410 }
411
412 if len(rest) != 0 {
413 return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs")
414 }
415
416 answers, err := cb(msg.User, msg.Instruction, prompts, echos)
417 if err != nil {
418 return false, nil, err
419 }
420
421 if len(answers) != len(prompts) {
422 return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback")
423 }
424 responseLength := 1 + 4
425 for _, a := range answers {
426 responseLength += stringLength(len(a))
427 }
428 serialized := make([]byte, responseLength)
429 p := serialized
430 p[0] = msgUserAuthInfoResponse
431 p = p[1:]
432 p = marshalUint32(p, uint32(len(answers)))
433 for _, a := range answers {
434 p = marshalString(p, []byte(a))
435 }
436
437 if err := c.writePacket(serialized); err != nil {
438 return false, nil, err
439 }
440 }
441}
442
443type retryableAuthMethod struct {
444 authMethod AuthMethod
445 maxTries int
446}
447
448func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok bool, methods []string, err error) {
449 for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ {
450 ok, methods, err = r.authMethod.auth(session, user, c, rand)
451 if ok || err != nil { // either success or error terminate
452 return ok, methods, err
453 }
454 }
455 return ok, methods, err
456}
457
458func (r *retryableAuthMethod) method() string {
459 return r.authMethod.method()
460}
461
462// RetryableAuthMethod is a decorator for other auth methods enabling them to
463// be retried up to maxTries before considering that AuthMethod itself failed.
464// If maxTries is <= 0, will retry indefinitely
465//
466// This is useful for interactive clients using challenge/response type
467// authentication (e.g. Keyboard-Interactive, Password, etc) where the user
468// could mistype their response resulting in the server issuing a
469// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4
470// [keyboard-interactive]); Without this decorator, the non-retryable
471// AuthMethod would be removed from future consideration, and never tried again
472// (and so the user would never be able to retry their entry).
473func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod {
474 return &retryableAuthMethod{authMethod: auth, maxTries: maxTries}
475}
diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go
new file mode 100644
index 0000000..8656d0f
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/common.go
@@ -0,0 +1,371 @@
1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "crypto"
9 "crypto/rand"
10 "fmt"
11 "io"
12 "sync"
13
14 _ "crypto/sha1"
15 _ "crypto/sha256"
16 _ "crypto/sha512"
17)
18
19// These are string constants in the SSH protocol.
20const (
21 compressionNone = "none"
22 serviceUserAuth = "ssh-userauth"
23 serviceSSH = "ssh-connection"
24)
25
26// supportedCiphers specifies the supported ciphers in preference order.
27var supportedCiphers = []string{
28 "aes128-ctr", "aes192-ctr", "aes256-ctr",
29 "aes128-gcm@openssh.com",
30 "arcfour256", "arcfour128",
31}
32
33// supportedKexAlgos specifies the supported key-exchange algorithms in
34// preference order.
35var supportedKexAlgos = []string{
36 kexAlgoCurve25519SHA256,
37 // P384 and P521 are not constant-time yet, but since we don't
38 // reuse ephemeral keys, using them for ECDH should be OK.
39 kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
40 kexAlgoDH14SHA1, kexAlgoDH1SHA1,
41}
42
43// supportedKexAlgos specifies the supported host-key algorithms (i.e. methods
44// of authenticating servers) in preference order.
45var supportedHostKeyAlgos = []string{
46 CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
47 CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01,
48
49 KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
50 KeyAlgoRSA, KeyAlgoDSA,
51
52 KeyAlgoED25519,
53}
54
55// supportedMACs specifies a default set of MAC algorithms in preference order.
56// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed
57// because they have reached the end of their useful life.
58var supportedMACs = []string{
59 "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96",
60}
61
62var supportedCompressions = []string{compressionNone}
63
64// hashFuncs keeps the mapping of supported algorithms to their respective
65// hashes needed for signature verification.
66var hashFuncs = map[string]crypto.Hash{
67 KeyAlgoRSA: crypto.SHA1,
68 KeyAlgoDSA: crypto.SHA1,
69 KeyAlgoECDSA256: crypto.SHA256,
70 KeyAlgoECDSA384: crypto.SHA384,
71 KeyAlgoECDSA521: crypto.SHA512,
72 CertAlgoRSAv01: crypto.SHA1,
73 CertAlgoDSAv01: crypto.SHA1,
74 CertAlgoECDSA256v01: crypto.SHA256,
75 CertAlgoECDSA384v01: crypto.SHA384,
76 CertAlgoECDSA521v01: crypto.SHA512,
77}
78
79// unexpectedMessageError results when the SSH message that we received didn't
80// match what we wanted.
81func unexpectedMessageError(expected, got uint8) error {
82 return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected)
83}
84
85// parseError results from a malformed SSH message.
86func parseError(tag uint8) error {
87 return fmt.Errorf("ssh: parse error in message type %d", tag)
88}
89
90func findCommon(what string, client []string, server []string) (common string, err error) {
91 for _, c := range client {
92 for _, s := range server {
93 if c == s {
94 return c, nil
95 }
96 }
97 }
98 return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server)
99}
100
101type directionAlgorithms struct {
102 Cipher string
103 MAC string
104 Compression string
105}
106
107// rekeyBytes returns a rekeying intervals in bytes.
108func (a *directionAlgorithms) rekeyBytes() int64 {
109 // According to RFC4344 block ciphers should rekey after
110 // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is
111 // 128.
112 switch a.Cipher {
113 case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID:
114 return 16 * (1 << 32)
115
116 }
117
118 // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data.
119 return 1 << 30
120}
121
122type algorithms struct {
123 kex string
124 hostKey string
125 w directionAlgorithms
126 r directionAlgorithms
127}
128
129func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) {
130 result := &algorithms{}
131
132 result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos)
133 if err != nil {
134 return
135 }
136
137 result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos)
138 if err != nil {
139 return
140 }
141
142 result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer)
143 if err != nil {
144 return
145 }
146
147 result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient)
148 if err != nil {
149 return
150 }
151
152 result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
153 if err != nil {
154 return
155 }
156
157 result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
158 if err != nil {
159 return
160 }
161
162 result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer)
163 if err != nil {
164 return
165 }
166
167 result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient)
168 if err != nil {
169 return
170 }
171
172 return result, nil
173}
174
175// If rekeythreshold is too small, we can't make any progress sending
176// stuff.
177const minRekeyThreshold uint64 = 256
178
179// Config contains configuration data common to both ServerConfig and
180// ClientConfig.
181type Config struct {
182 // Rand provides the source of entropy for cryptographic
183 // primitives. If Rand is nil, the cryptographic random reader
184 // in package crypto/rand will be used.
185 Rand io.Reader
186
187 // The maximum number of bytes sent or received after which a
188 // new key is negotiated. It must be at least 256. If
189 // unspecified, 1 gigabyte is used.
190 RekeyThreshold uint64
191
192 // The allowed key exchanges algorithms. If unspecified then a
193 // default set of algorithms is used.
194 KeyExchanges []string
195
196 // The allowed cipher algorithms. If unspecified then a sensible
197 // default is used.
198 Ciphers []string
199
200 // The allowed MAC algorithms. If unspecified then a sensible default
201 // is used.
202 MACs []string
203}
204
205// SetDefaults sets sensible values for unset fields in config. This is
206// exported for testing: Configs passed to SSH functions are copied and have
207// default values set automatically.
208func (c *Config) SetDefaults() {
209 if c.Rand == nil {
210 c.Rand = rand.Reader
211 }
212 if c.Ciphers == nil {
213 c.Ciphers = supportedCiphers
214 }
215 var ciphers []string
216 for _, c := range c.Ciphers {
217 if cipherModes[c] != nil {
218 // reject the cipher if we have no cipherModes definition
219 ciphers = append(ciphers, c)
220 }
221 }
222 c.Ciphers = ciphers
223
224 if c.KeyExchanges == nil {
225 c.KeyExchanges = supportedKexAlgos
226 }
227
228 if c.MACs == nil {
229 c.MACs = supportedMACs
230 }
231
232 if c.RekeyThreshold == 0 {
233 // RFC 4253, section 9 suggests rekeying after 1G.
234 c.RekeyThreshold = 1 << 30
235 }
236 if c.RekeyThreshold < minRekeyThreshold {
237 c.RekeyThreshold = minRekeyThreshold
238 }
239}
240
241// buildDataSignedForAuth returns the data that is signed in order to prove
242// possession of a private key. See RFC 4252, section 7.
243func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte {
244 data := struct {
245 Session []byte
246 Type byte
247 User string
248 Service string
249 Method string
250 Sign bool
251 Algo []byte
252 PubKey []byte
253 }{
254 sessionId,
255 msgUserAuthRequest,
256 req.User,
257 req.Service,
258 req.Method,
259 true,
260 algo,
261 pubKey,
262 }
263 return Marshal(data)
264}
265
266func appendU16(buf []byte, n uint16) []byte {
267 return append(buf, byte(n>>8), byte(n))
268}
269
270func appendU32(buf []byte, n uint32) []byte {
271 return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
272}
273
274func appendU64(buf []byte, n uint64) []byte {
275 return append(buf,
276 byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32),
277 byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
278}
279
280func appendInt(buf []byte, n int) []byte {
281 return appendU32(buf, uint32(n))
282}
283
284func appendString(buf []byte, s string) []byte {
285 buf = appendU32(buf, uint32(len(s)))
286 buf = append(buf, s...)
287 return buf
288}
289
290func appendBool(buf []byte, b bool) []byte {
291 if b {
292 return append(buf, 1)
293 }
294 return append(buf, 0)
295}
296
297// newCond is a helper to hide the fact that there is no usable zero
298// value for sync.Cond.
299func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) }
300
301// window represents the buffer available to clients
302// wishing to write to a channel.
303type window struct {
304 *sync.Cond
305 win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1
306 writeWaiters int
307 closed bool
308}
309
310// add adds win to the amount of window available
311// for consumers.
312func (w *window) add(win uint32) bool {
313 // a zero sized window adjust is a noop.
314 if win == 0 {
315 return true
316 }
317 w.L.Lock()
318 if w.win+win < win {
319 w.L.Unlock()
320 return false
321 }
322 w.win += win
323 // It is unusual that multiple goroutines would be attempting to reserve
324 // window space, but not guaranteed. Use broadcast to notify all waiters
325 // that additional window is available.
326 w.Broadcast()
327 w.L.Unlock()
328 return true
329}
330
331// close sets the window to closed, so all reservations fail
332// immediately.
333func (w *window) close() {
334 w.L.Lock()
335 w.closed = true
336 w.Broadcast()
337 w.L.Unlock()
338}
339
340// reserve reserves win from the available window capacity.
341// If no capacity remains, reserve will block. reserve may
342// return less than requested.
343func (w *window) reserve(win uint32) (uint32, error) {
344 var err error
345 w.L.Lock()
346 w.writeWaiters++
347 w.Broadcast()
348 for w.win == 0 && !w.closed {
349 w.Wait()
350 }
351 w.writeWaiters--
352 if w.win < win {
353 win = w.win
354 }
355 w.win -= win
356 if w.closed {
357 err = io.EOF
358 }
359 w.L.Unlock()
360 return win, err
361}
362
363// waitWriterBlocked waits until some goroutine is blocked for further
364// writes. It is used in tests only.
365func (w *window) waitWriterBlocked() {
366 w.Cond.L.Lock()
367 for w.writeWaiters == 0 {
368 w.Cond.Wait()
369 }
370 w.Cond.L.Unlock()
371}
diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go
new file mode 100644
index 0000000..e786f2f
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/connection.go
@@ -0,0 +1,143 @@
1// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "fmt"
9 "net"
10)
11
12// OpenChannelError is returned if the other side rejects an
13// OpenChannel request.
14type OpenChannelError struct {
15 Reason RejectionReason
16 Message string
17}
18
19func (e *OpenChannelError) Error() string {
20 return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message)
21}
22
23// ConnMetadata holds metadata for the connection.
24type ConnMetadata interface {
25 // User returns the user ID for this connection.
26 User() string
27
28 // SessionID returns the sesson hash, also denoted by H.
29 SessionID() []byte
30
31 // ClientVersion returns the client's version string as hashed
32 // into the session ID.
33 ClientVersion() []byte
34
35 // ServerVersion returns the server's version string as hashed
36 // into the session ID.
37 ServerVersion() []byte
38
39 // RemoteAddr returns the remote address for this connection.
40 RemoteAddr() net.Addr
41
42 // LocalAddr returns the local address for this connection.
43 LocalAddr() net.Addr
44}
45
46// Conn represents an SSH connection for both server and client roles.
47// Conn is the basis for implementing an application layer, such
48// as ClientConn, which implements the traditional shell access for
49// clients.
50type Conn interface {
51 ConnMetadata
52
53 // SendRequest sends a global request, and returns the
54 // reply. If wantReply is true, it returns the response status
55 // and payload. See also RFC4254, section 4.
56 SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error)
57
58 // OpenChannel tries to open an channel. If the request is
59 // rejected, it returns *OpenChannelError. On success it returns
60 // the SSH Channel and a Go channel for incoming, out-of-band
61 // requests. The Go channel must be serviced, or the
62 // connection will hang.
63 OpenChannel(name string, data []byte) (Channel, <-chan *Request, error)
64
65 // Close closes the underlying network connection
66 Close() error
67
68 // Wait blocks until the connection has shut down, and returns the
69 // error causing the shutdown.
70 Wait() error
71
72 // TODO(hanwen): consider exposing:
73 // RequestKeyChange
74 // Disconnect
75}
76
77// DiscardRequests consumes and rejects all requests from the
78// passed-in channel.
79func DiscardRequests(in <-chan *Request) {
80 for req := range in {
81 if req.WantReply {
82 req.Reply(false, nil)
83 }
84 }
85}
86
87// A connection represents an incoming connection.
88type connection struct {
89 transport *handshakeTransport
90 sshConn
91
92 // The connection protocol.
93 *mux
94}
95
96func (c *connection) Close() error {
97 return c.sshConn.conn.Close()
98}
99
100// sshconn provides net.Conn metadata, but disallows direct reads and
101// writes.
102type sshConn struct {
103 conn net.Conn
104
105 user string
106 sessionID []byte
107 clientVersion []byte
108 serverVersion []byte
109}
110
111func dup(src []byte) []byte {
112 dst := make([]byte, len(src))
113 copy(dst, src)
114 return dst
115}
116
117func (c *sshConn) User() string {
118 return c.user
119}
120
121func (c *sshConn) RemoteAddr() net.Addr {
122 return c.conn.RemoteAddr()
123}
124
125func (c *sshConn) Close() error {
126 return c.conn.Close()
127}
128
129func (c *sshConn) LocalAddr() net.Addr {
130 return c.conn.LocalAddr()
131}
132
133func (c *sshConn) SessionID() []byte {
134 return dup(c.sessionID)
135}
136
137func (c *sshConn) ClientVersion() []byte {
138 return dup(c.clientVersion)
139}
140
141func (c *sshConn) ServerVersion() []byte {
142 return dup(c.serverVersion)
143}
diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go
new file mode 100644
index 0000000..d6be894
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/doc.go
@@ -0,0 +1,18 @@
1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5/*
6Package ssh implements an SSH client and server.
7
8SSH is a transport security protocol, an authentication protocol and a
9family of application protocols. The most typical application level
10protocol is a remote shell and this is specifically implemented. However,
11the multiplexed nature of SSH is exposed to users that wish to support
12others.
13
14References:
15 [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
16 [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
17*/
18package ssh // import "golang.org/x/crypto/ssh"
diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go
new file mode 100644
index 0000000..8de6506
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/handshake.go
@@ -0,0 +1,625 @@
1// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "crypto/rand"
9 "errors"
10 "fmt"
11 "io"
12 "log"
13 "net"
14 "sync"
15)
16
17// debugHandshake, if set, prints messages sent and received. Key
18// exchange messages are printed as if DH were used, so the debug
19// messages are wrong when using ECDH.
20const debugHandshake = false
21
22// chanSize sets the amount of buffering SSH connections. This is
23// primarily for testing: setting chanSize=0 uncovers deadlocks more
24// quickly.
25const chanSize = 16
26
27// keyingTransport is a packet based transport that supports key
28// changes. It need not be thread-safe. It should pass through
29// msgNewKeys in both directions.
30type keyingTransport interface {
31 packetConn
32
33 // prepareKeyChange sets up a key change. The key change for a
34 // direction will be effected if a msgNewKeys message is sent
35 // or received.
36 prepareKeyChange(*algorithms, *kexResult) error
37}
38
39// handshakeTransport implements rekeying on top of a keyingTransport
40// and offers a thread-safe writePacket() interface.
41type handshakeTransport struct {
42 conn keyingTransport
43 config *Config
44
45 serverVersion []byte
46 clientVersion []byte
47
48 // hostKeys is non-empty if we are the server. In that case,
49 // it contains all host keys that can be used to sign the
50 // connection.
51 hostKeys []Signer
52
53 // hostKeyAlgorithms is non-empty if we are the client. In that case,
54 // we accept these key types from the server as host key.
55 hostKeyAlgorithms []string
56
57 // On read error, incoming is closed, and readError is set.
58 incoming chan []byte
59 readError error
60
61 mu sync.Mutex
62 writeError error
63 sentInitPacket []byte
64 sentInitMsg *kexInitMsg
65 pendingPackets [][]byte // Used when a key exchange is in progress.
66
67 // If the read loop wants to schedule a kex, it pings this
68 // channel, and the write loop will send out a kex
69 // message.
70 requestKex chan struct{}
71
72 // If the other side requests or confirms a kex, its kexInit
73 // packet is sent here for the write loop to find it.
74 startKex chan *pendingKex
75
76 // data for host key checking
77 hostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
78 dialAddress string
79 remoteAddr net.Addr
80
81 // Algorithms agreed in the last key exchange.
82 algorithms *algorithms
83
84 readPacketsLeft uint32
85 readBytesLeft int64
86
87 writePacketsLeft uint32
88 writeBytesLeft int64
89
90 // The session ID or nil if first kex did not complete yet.
91 sessionID []byte
92}
93
94type pendingKex struct {
95 otherInit []byte
96 done chan error
97}
98
99func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport {
100 t := &handshakeTransport{
101 conn: conn,
102 serverVersion: serverVersion,
103 clientVersion: clientVersion,
104 incoming: make(chan []byte, chanSize),
105 requestKex: make(chan struct{}, 1),
106 startKex: make(chan *pendingKex, 1),
107
108 config: config,
109 }
110
111 // We always start with a mandatory key exchange.
112 t.requestKex <- struct{}{}
113 return t
114}
115
116func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport {
117 t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
118 t.dialAddress = dialAddr
119 t.remoteAddr = addr
120 t.hostKeyCallback = config.HostKeyCallback
121 if config.HostKeyAlgorithms != nil {
122 t.hostKeyAlgorithms = config.HostKeyAlgorithms
123 } else {
124 t.hostKeyAlgorithms = supportedHostKeyAlgos
125 }
126 go t.readLoop()
127 go t.kexLoop()
128 return t
129}
130
131func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport {
132 t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
133 t.hostKeys = config.hostKeys
134 go t.readLoop()
135 go t.kexLoop()
136 return t
137}
138
139func (t *handshakeTransport) getSessionID() []byte {
140 return t.sessionID
141}
142
143// waitSession waits for the session to be established. This should be
144// the first thing to call after instantiating handshakeTransport.
145func (t *handshakeTransport) waitSession() error {
146 p, err := t.readPacket()
147 if err != nil {
148 return err
149 }
150 if p[0] != msgNewKeys {
151 return fmt.Errorf("ssh: first packet should be msgNewKeys")
152 }
153
154 return nil
155}
156
157func (t *handshakeTransport) id() string {
158 if len(t.hostKeys) > 0 {
159 return "server"
160 }
161 return "client"
162}
163
164func (t *handshakeTransport) printPacket(p []byte, write bool) {
165 action := "got"
166 if write {
167 action = "sent"
168 }
169
170 if p[0] == msgChannelData || p[0] == msgChannelExtendedData {
171 log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p))
172 } else {
173 msg, err := decode(p)
174 log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err)
175 }
176}
177
178func (t *handshakeTransport) readPacket() ([]byte, error) {
179 p, ok := <-t.incoming
180 if !ok {
181 return nil, t.readError
182 }
183 return p, nil
184}
185
186func (t *handshakeTransport) readLoop() {
187 first := true
188 for {
189 p, err := t.readOnePacket(first)
190 first = false
191 if err != nil {
192 t.readError = err
193 close(t.incoming)
194 break
195 }
196 if p[0] == msgIgnore || p[0] == msgDebug {
197 continue
198 }
199 t.incoming <- p
200 }
201
202 // Stop writers too.
203 t.recordWriteError(t.readError)
204
205 // Unblock the writer should it wait for this.
206 close(t.startKex)
207
208 // Don't close t.requestKex; it's also written to from writePacket.
209}
210
211func (t *handshakeTransport) pushPacket(p []byte) error {
212 if debugHandshake {
213 t.printPacket(p, true)
214 }
215 return t.conn.writePacket(p)
216}
217
218func (t *handshakeTransport) getWriteError() error {
219 t.mu.Lock()
220 defer t.mu.Unlock()
221 return t.writeError
222}
223
224func (t *handshakeTransport) recordWriteError(err error) {
225 t.mu.Lock()
226 defer t.mu.Unlock()
227 if t.writeError == nil && err != nil {
228 t.writeError = err
229 }
230}
231
232func (t *handshakeTransport) requestKeyExchange() {
233 select {
234 case t.requestKex <- struct{}{}:
235 default:
236 // something already requested a kex, so do nothing.
237 }
238}
239
240func (t *handshakeTransport) kexLoop() {
241
242write:
243 for t.getWriteError() == nil {
244 var request *pendingKex
245 var sent bool
246
247 for request == nil || !sent {
248 var ok bool
249 select {
250 case request, ok = <-t.startKex:
251 if !ok {
252 break write
253 }
254 case <-t.requestKex:
255 break
256 }
257
258 if !sent {
259 if err := t.sendKexInit(); err != nil {
260 t.recordWriteError(err)
261 break
262 }
263 sent = true
264 }
265 }
266
267 if err := t.getWriteError(); err != nil {
268 if request != nil {
269 request.done <- err
270 }
271 break
272 }
273
274 // We're not servicing t.requestKex, but that is OK:
275 // we never block on sending to t.requestKex.
276
277 // We're not servicing t.startKex, but the remote end
278 // has just sent us a kexInitMsg, so it can't send
279 // another key change request, until we close the done
280 // channel on the pendingKex request.
281
282 err := t.enterKeyExchange(request.otherInit)
283
284 t.mu.Lock()
285 t.writeError = err
286 t.sentInitPacket = nil
287 t.sentInitMsg = nil
288 t.writePacketsLeft = packetRekeyThreshold
289 if t.config.RekeyThreshold > 0 {
290 t.writeBytesLeft = int64(t.config.RekeyThreshold)
291 } else if t.algorithms != nil {
292 t.writeBytesLeft = t.algorithms.w.rekeyBytes()
293 }
294
295 // we have completed the key exchange. Since the
296 // reader is still blocked, it is safe to clear out
297 // the requestKex channel. This avoids the situation
298 // where: 1) we consumed our own request for the
299 // initial kex, and 2) the kex from the remote side
300 // caused another send on the requestKex channel,
301 clear:
302 for {
303 select {
304 case <-t.requestKex:
305 //
306 default:
307 break clear
308 }
309 }
310
311 request.done <- t.writeError
312
313 // kex finished. Push packets that we received while
314 // the kex was in progress. Don't look at t.startKex
315 // and don't increment writtenSinceKex: if we trigger
316 // another kex while we are still busy with the last
317 // one, things will become very confusing.
318 for _, p := range t.pendingPackets {
319 t.writeError = t.pushPacket(p)
320 if t.writeError != nil {
321 break
322 }
323 }
324 t.pendingPackets = t.pendingPackets[:0]
325 t.mu.Unlock()
326 }
327
328 // drain startKex channel. We don't service t.requestKex
329 // because nobody does blocking sends there.
330 go func() {
331 for init := range t.startKex {
332 init.done <- t.writeError
333 }
334 }()
335
336 // Unblock reader.
337 t.conn.Close()
338}
339
340// The protocol uses uint32 for packet counters, so we can't let them
341// reach 1<<32. We will actually read and write more packets than
342// this, though: the other side may send more packets, and after we
343// hit this limit on writing we will send a few more packets for the
344// key exchange itself.
345const packetRekeyThreshold = (1 << 31)
346
347func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) {
348 p, err := t.conn.readPacket()
349 if err != nil {
350 return nil, err
351 }
352
353 if t.readPacketsLeft > 0 {
354 t.readPacketsLeft--
355 } else {
356 t.requestKeyExchange()
357 }
358
359 if t.readBytesLeft > 0 {
360 t.readBytesLeft -= int64(len(p))
361 } else {
362 t.requestKeyExchange()
363 }
364
365 if debugHandshake {
366 t.printPacket(p, false)
367 }
368
369 if first && p[0] != msgKexInit {
370 return nil, fmt.Errorf("ssh: first packet should be msgKexInit")
371 }
372
373 if p[0] != msgKexInit {
374 return p, nil
375 }
376
377 firstKex := t.sessionID == nil
378
379 kex := pendingKex{
380 done: make(chan error, 1),
381 otherInit: p,
382 }
383 t.startKex <- &kex
384 err = <-kex.done
385
386 if debugHandshake {
387 log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err)
388 }
389
390 if err != nil {
391 return nil, err
392 }
393
394 t.readPacketsLeft = packetRekeyThreshold
395 if t.config.RekeyThreshold > 0 {
396 t.readBytesLeft = int64(t.config.RekeyThreshold)
397 } else {
398 t.readBytesLeft = t.algorithms.r.rekeyBytes()
399 }
400
401 // By default, a key exchange is hidden from higher layers by
402 // translating it into msgIgnore.
403 successPacket := []byte{msgIgnore}
404 if firstKex {
405 // sendKexInit() for the first kex waits for
406 // msgNewKeys so the authentication process is
407 // guaranteed to happen over an encrypted transport.
408 successPacket = []byte{msgNewKeys}
409 }
410
411 return successPacket, nil
412}
413
414// sendKexInit sends a key change message.
415func (t *handshakeTransport) sendKexInit() error {
416 t.mu.Lock()
417 defer t.mu.Unlock()
418 if t.sentInitMsg != nil {
419 // kexInits may be sent either in response to the other side,
420 // or because our side wants to initiate a key change, so we
421 // may have already sent a kexInit. In that case, don't send a
422 // second kexInit.
423 return nil
424 }
425
426 msg := &kexInitMsg{
427 KexAlgos: t.config.KeyExchanges,
428 CiphersClientServer: t.config.Ciphers,
429 CiphersServerClient: t.config.Ciphers,
430 MACsClientServer: t.config.MACs,
431 MACsServerClient: t.config.MACs,
432 CompressionClientServer: supportedCompressions,
433 CompressionServerClient: supportedCompressions,
434 }
435 io.ReadFull(rand.Reader, msg.Cookie[:])
436
437 if len(t.hostKeys) > 0 {
438 for _, k := range t.hostKeys {
439 msg.ServerHostKeyAlgos = append(
440 msg.ServerHostKeyAlgos, k.PublicKey().Type())
441 }
442 } else {
443 msg.ServerHostKeyAlgos = t.hostKeyAlgorithms
444 }
445 packet := Marshal(msg)
446
447 // writePacket destroys the contents, so save a copy.
448 packetCopy := make([]byte, len(packet))
449 copy(packetCopy, packet)
450
451 if err := t.pushPacket(packetCopy); err != nil {
452 return err
453 }
454
455 t.sentInitMsg = msg
456 t.sentInitPacket = packet
457
458 return nil
459}
460
461func (t *handshakeTransport) writePacket(p []byte) error {
462 switch p[0] {
463 case msgKexInit:
464 return errors.New("ssh: only handshakeTransport can send kexInit")
465 case msgNewKeys:
466 return errors.New("ssh: only handshakeTransport can send newKeys")
467 }
468
469 t.mu.Lock()
470 defer t.mu.Unlock()
471 if t.writeError != nil {
472 return t.writeError
473 }
474
475 if t.sentInitMsg != nil {
476 // Copy the packet so the writer can reuse the buffer.
477 cp := make([]byte, len(p))
478 copy(cp, p)
479 t.pendingPackets = append(t.pendingPackets, cp)
480 return nil
481 }
482
483 if t.writeBytesLeft > 0 {
484 t.writeBytesLeft -= int64(len(p))
485 } else {
486 t.requestKeyExchange()
487 }
488
489 if t.writePacketsLeft > 0 {
490 t.writePacketsLeft--
491 } else {
492 t.requestKeyExchange()
493 }
494
495 if err := t.pushPacket(p); err != nil {
496 t.writeError = err
497 }
498
499 return nil
500}
501
502func (t *handshakeTransport) Close() error {
503 return t.conn.Close()
504}
505
506func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
507 if debugHandshake {
508 log.Printf("%s entered key exchange", t.id())
509 }
510
511 otherInit := &kexInitMsg{}
512 if err := Unmarshal(otherInitPacket, otherInit); err != nil {
513 return err
514 }
515
516 magics := handshakeMagics{
517 clientVersion: t.clientVersion,
518 serverVersion: t.serverVersion,
519 clientKexInit: otherInitPacket,
520 serverKexInit: t.sentInitPacket,
521 }
522
523 clientInit := otherInit
524 serverInit := t.sentInitMsg
525 if len(t.hostKeys) == 0 {
526 clientInit, serverInit = serverInit, clientInit
527
528 magics.clientKexInit = t.sentInitPacket
529 magics.serverKexInit = otherInitPacket
530 }
531
532 var err error
533 t.algorithms, err = findAgreedAlgorithms(clientInit, serverInit)
534 if err != nil {
535 return err
536 }
537
538 // We don't send FirstKexFollows, but we handle receiving it.
539 //
540 // RFC 4253 section 7 defines the kex and the agreement method for
541 // first_kex_packet_follows. It states that the guessed packet
542 // should be ignored if the "kex algorithm and/or the host
543 // key algorithm is guessed wrong (server and client have
544 // different preferred algorithm), or if any of the other
545 // algorithms cannot be agreed upon". The other algorithms have
546 // already been checked above so the kex algorithm and host key
547 // algorithm are checked here.
548 if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) {
549 // other side sent a kex message for the wrong algorithm,
550 // which we have to ignore.
551 if _, err := t.conn.readPacket(); err != nil {
552 return err
553 }
554 }
555
556 kex, ok := kexAlgoMap[t.algorithms.kex]
557 if !ok {
558 return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex)
559 }
560
561 var result *kexResult
562 if len(t.hostKeys) > 0 {
563 result, err = t.server(kex, t.algorithms, &magics)
564 } else {
565 result, err = t.client(kex, t.algorithms, &magics)
566 }
567
568 if err != nil {
569 return err
570 }
571
572 if t.sessionID == nil {
573 t.sessionID = result.H
574 }
575 result.SessionID = t.sessionID
576
577 t.conn.prepareKeyChange(t.algorithms, result)
578 if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
579 return err
580 }
581 if packet, err := t.conn.readPacket(); err != nil {
582 return err
583 } else if packet[0] != msgNewKeys {
584 return unexpectedMessageError(msgNewKeys, packet[0])
585 }
586
587 return nil
588}
589
590func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
591 var hostKey Signer
592 for _, k := range t.hostKeys {
593 if algs.hostKey == k.PublicKey().Type() {
594 hostKey = k
595 }
596 }
597
598 r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey)
599 return r, err
600}
601
602func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
603 result, err := kex.Client(t.conn, t.config.Rand, magics)
604 if err != nil {
605 return nil, err
606 }
607
608 hostKey, err := ParsePublicKey(result.HostKey)
609 if err != nil {
610 return nil, err
611 }
612
613 if err := verifyHostKeySignature(hostKey, result); err != nil {
614 return nil, err
615 }
616
617 if t.hostKeyCallback != nil {
618 err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey)
619 if err != nil {
620 return nil, err
621 }
622 }
623
624 return result, nil
625}
diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go
new file mode 100644
index 0000000..c87fbeb
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/kex.go
@@ -0,0 +1,540 @@
1// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "crypto"
9 "crypto/ecdsa"
10 "crypto/elliptic"
11 "crypto/rand"
12 "crypto/subtle"
13 "errors"
14 "io"
15 "math/big"
16
17 "golang.org/x/crypto/curve25519"
18)
19
20const (
21 kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
22 kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
23 kexAlgoECDH256 = "ecdh-sha2-nistp256"
24 kexAlgoECDH384 = "ecdh-sha2-nistp384"
25 kexAlgoECDH521 = "ecdh-sha2-nistp521"
26 kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org"
27)
28
29// kexResult captures the outcome of a key exchange.
30type kexResult struct {
31 // Session hash. See also RFC 4253, section 8.
32 H []byte
33
34 // Shared secret. See also RFC 4253, section 8.
35 K []byte
36
37 // Host key as hashed into H.
38 HostKey []byte
39
40 // Signature of H.
41 Signature []byte
42
43 // A cryptographic hash function that matches the security
44 // level of the key exchange algorithm. It is used for
45 // calculating H, and for deriving keys from H and K.
46 Hash crypto.Hash
47
48 // The session ID, which is the first H computed. This is used
49 // to derive key material inside the transport.
50 SessionID []byte
51}
52
53// handshakeMagics contains data that is always included in the
54// session hash.
55type handshakeMagics struct {
56 clientVersion, serverVersion []byte
57 clientKexInit, serverKexInit []byte
58}
59
60func (m *handshakeMagics) write(w io.Writer) {
61 writeString(w, m.clientVersion)
62 writeString(w, m.serverVersion)
63 writeString(w, m.clientKexInit)
64 writeString(w, m.serverKexInit)
65}
66
67// kexAlgorithm abstracts different key exchange algorithms.
68type kexAlgorithm interface {
69 // Server runs server-side key agreement, signing the result
70 // with a hostkey.
71 Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error)
72
73 // Client runs the client-side key agreement. Caller is
74 // responsible for verifying the host key signature.
75 Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error)
76}
77
78// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement.
79type dhGroup struct {
80 g, p, pMinus1 *big.Int
81}
82
83func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
84 if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 {
85 return nil, errors.New("ssh: DH parameter out of bounds")
86 }
87 return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil
88}
89
90func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
91 hashFunc := crypto.SHA1
92
93 var x *big.Int
94 for {
95 var err error
96 if x, err = rand.Int(randSource, group.pMinus1); err != nil {
97 return nil, err
98 }
99 if x.Sign() > 0 {
100 break
101 }
102 }
103
104 X := new(big.Int).Exp(group.g, x, group.p)
105 kexDHInit := kexDHInitMsg{
106 X: X,
107 }
108 if err := c.writePacket(Marshal(&kexDHInit)); err != nil {
109 return nil, err
110 }
111
112 packet, err := c.readPacket()
113 if err != nil {
114 return nil, err
115 }
116
117 var kexDHReply kexDHReplyMsg
118 if err = Unmarshal(packet, &kexDHReply); err != nil {
119 return nil, err
120 }
121
122 kInt, err := group.diffieHellman(kexDHReply.Y, x)
123 if err != nil {
124 return nil, err
125 }
126
127 h := hashFunc.New()
128 magics.write(h)
129 writeString(h, kexDHReply.HostKey)
130 writeInt(h, X)
131 writeInt(h, kexDHReply.Y)
132 K := make([]byte, intLength(kInt))
133 marshalInt(K, kInt)
134 h.Write(K)
135
136 return &kexResult{
137 H: h.Sum(nil),
138 K: K,
139 HostKey: kexDHReply.HostKey,
140 Signature: kexDHReply.Signature,
141 Hash: crypto.SHA1,
142 }, nil
143}
144
145func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
146 hashFunc := crypto.SHA1
147 packet, err := c.readPacket()
148 if err != nil {
149 return
150 }
151 var kexDHInit kexDHInitMsg
152 if err = Unmarshal(packet, &kexDHInit); err != nil {
153 return
154 }
155
156 var y *big.Int
157 for {
158 if y, err = rand.Int(randSource, group.pMinus1); err != nil {
159 return
160 }
161 if y.Sign() > 0 {
162 break
163 }
164 }
165
166 Y := new(big.Int).Exp(group.g, y, group.p)
167 kInt, err := group.diffieHellman(kexDHInit.X, y)
168 if err != nil {
169 return nil, err
170 }
171
172 hostKeyBytes := priv.PublicKey().Marshal()
173
174 h := hashFunc.New()
175 magics.write(h)
176 writeString(h, hostKeyBytes)
177 writeInt(h, kexDHInit.X)
178 writeInt(h, Y)
179
180 K := make([]byte, intLength(kInt))
181 marshalInt(K, kInt)
182 h.Write(K)
183
184 H := h.Sum(nil)
185
186 // H is already a hash, but the hostkey signing will apply its
187 // own key-specific hash algorithm.
188 sig, err := signAndMarshal(priv, randSource, H)
189 if err != nil {
190 return nil, err
191 }
192
193 kexDHReply := kexDHReplyMsg{
194 HostKey: hostKeyBytes,
195 Y: Y,
196 Signature: sig,
197 }
198 packet = Marshal(&kexDHReply)
199
200 err = c.writePacket(packet)
201 return &kexResult{
202 H: H,
203 K: K,
204 HostKey: hostKeyBytes,
205 Signature: sig,
206 Hash: crypto.SHA1,
207 }, nil
208}
209
210// ecdh performs Elliptic Curve Diffie-Hellman key exchange as
211// described in RFC 5656, section 4.
212type ecdh struct {
213 curve elliptic.Curve
214}
215
216func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
217 ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
218 if err != nil {
219 return nil, err
220 }
221
222 kexInit := kexECDHInitMsg{
223 ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y),
224 }
225
226 serialized := Marshal(&kexInit)
227 if err := c.writePacket(serialized); err != nil {
228 return nil, err
229 }
230
231 packet, err := c.readPacket()
232 if err != nil {
233 return nil, err
234 }
235
236 var reply kexECDHReplyMsg
237 if err = Unmarshal(packet, &reply); err != nil {
238 return nil, err
239 }
240
241 x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey)
242 if err != nil {
243 return nil, err
244 }
245
246 // generate shared secret
247 secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes())
248
249 h := ecHash(kex.curve).New()
250 magics.write(h)
251 writeString(h, reply.HostKey)
252 writeString(h, kexInit.ClientPubKey)
253 writeString(h, reply.EphemeralPubKey)
254 K := make([]byte, intLength(secret))
255 marshalInt(K, secret)
256 h.Write(K)
257
258 return &kexResult{
259 H: h.Sum(nil),
260 K: K,
261 HostKey: reply.HostKey,
262 Signature: reply.Signature,
263 Hash: ecHash(kex.curve),
264 }, nil
265}
266
267// unmarshalECKey parses and checks an EC key.
268func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) {
269 x, y = elliptic.Unmarshal(curve, pubkey)
270 if x == nil {
271 return nil, nil, errors.New("ssh: elliptic.Unmarshal failure")
272 }
273 if !validateECPublicKey(curve, x, y) {
274 return nil, nil, errors.New("ssh: public key not on curve")
275 }
276 return x, y, nil
277}
278
279// validateECPublicKey checks that the point is a valid public key for
280// the given curve. See [SEC1], 3.2.2
281func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool {
282 if x.Sign() == 0 && y.Sign() == 0 {
283 return false
284 }
285
286 if x.Cmp(curve.Params().P) >= 0 {
287 return false
288 }
289
290 if y.Cmp(curve.Params().P) >= 0 {
291 return false
292 }
293
294 if !curve.IsOnCurve(x, y) {
295 return false
296 }
297
298 // We don't check if N * PubKey == 0, since
299 //
300 // - the NIST curves have cofactor = 1, so this is implicit.
301 // (We don't foresee an implementation that supports non NIST
302 // curves)
303 //
304 // - for ephemeral keys, we don't need to worry about small
305 // subgroup attacks.
306 return true
307}
308
309func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
310 packet, err := c.readPacket()
311 if err != nil {
312 return nil, err
313 }
314
315 var kexECDHInit kexECDHInitMsg
316 if err = Unmarshal(packet, &kexECDHInit); err != nil {
317 return nil, err
318 }
319
320 clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey)
321 if err != nil {
322 return nil, err
323 }
324
325 // We could cache this key across multiple users/multiple
326 // connection attempts, but the benefit is small. OpenSSH
327 // generates a new key for each incoming connection.
328 ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
329 if err != nil {
330 return nil, err
331 }
332
333 hostKeyBytes := priv.PublicKey().Marshal()
334
335 serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y)
336
337 // generate shared secret
338 secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes())
339
340 h := ecHash(kex.curve).New()
341 magics.write(h)
342 writeString(h, hostKeyBytes)
343 writeString(h, kexECDHInit.ClientPubKey)
344 writeString(h, serializedEphKey)
345
346 K := make([]byte, intLength(secret))
347 marshalInt(K, secret)
348 h.Write(K)
349
350 H := h.Sum(nil)
351
352 // H is already a hash, but the hostkey signing will apply its
353 // own key-specific hash algorithm.
354 sig, err := signAndMarshal(priv, rand, H)
355 if err != nil {
356 return nil, err
357 }
358
359 reply := kexECDHReplyMsg{
360 EphemeralPubKey: serializedEphKey,
361 HostKey: hostKeyBytes,
362 Signature: sig,
363 }
364
365 serialized := Marshal(&reply)
366 if err := c.writePacket(serialized); err != nil {
367 return nil, err
368 }
369
370 return &kexResult{
371 H: H,
372 K: K,
373 HostKey: reply.HostKey,
374 Signature: sig,
375 Hash: ecHash(kex.curve),
376 }, nil
377}
378
379var kexAlgoMap = map[string]kexAlgorithm{}
380
381func init() {
382 // This is the group called diffie-hellman-group1-sha1 in RFC
383 // 4253 and Oakley Group 2 in RFC 2409.
384 p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16)
385 kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{
386 g: new(big.Int).SetInt64(2),
387 p: p,
388 pMinus1: new(big.Int).Sub(p, bigOne),
389 }
390
391 // This is the group called diffie-hellman-group14-sha1 in RFC
392 // 4253 and Oakley Group 14 in RFC 3526.
393 p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
394
395 kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
396 g: new(big.Int).SetInt64(2),
397 p: p,
398 pMinus1: new(big.Int).Sub(p, bigOne),
399 }
400
401 kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
402 kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()}
403 kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()}
404 kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{}
405}
406
407// curve25519sha256 implements the curve25519-sha256@libssh.org key
408// agreement protocol, as described in
409// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt
410type curve25519sha256 struct{}
411
412type curve25519KeyPair struct {
413 priv [32]byte
414 pub [32]byte
415}
416
417func (kp *curve25519KeyPair) generate(rand io.Reader) error {
418 if _, err := io.ReadFull(rand, kp.priv[:]); err != nil {
419 return err
420 }
421 curve25519.ScalarBaseMult(&kp.pub, &kp.priv)
422 return nil
423}
424
425// curve25519Zeros is just an array of 32 zero bytes so that we have something
426// convenient to compare against in order to reject curve25519 points with the
427// wrong order.
428var curve25519Zeros [32]byte
429
430func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
431 var kp curve25519KeyPair
432 if err := kp.generate(rand); err != nil {
433 return nil, err
434 }
435 if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil {
436 return nil, err
437 }
438
439 packet, err := c.readPacket()
440 if err != nil {
441 return nil, err
442 }
443
444 var reply kexECDHReplyMsg
445 if err = Unmarshal(packet, &reply); err != nil {
446 return nil, err
447 }
448 if len(reply.EphemeralPubKey) != 32 {
449 return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
450 }
451
452 var servPub, secret [32]byte
453 copy(servPub[:], reply.EphemeralPubKey)
454 curve25519.ScalarMult(&secret, &kp.priv, &servPub)
455 if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
456 return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
457 }
458
459 h := crypto.SHA256.New()
460 magics.write(h)
461 writeString(h, reply.HostKey)
462 writeString(h, kp.pub[:])
463 writeString(h, reply.EphemeralPubKey)
464
465 kInt := new(big.Int).SetBytes(secret[:])
466 K := make([]byte, intLength(kInt))
467 marshalInt(K, kInt)
468 h.Write(K)
469
470 return &kexResult{
471 H: h.Sum(nil),
472 K: K,
473 HostKey: reply.HostKey,
474 Signature: reply.Signature,
475 Hash: crypto.SHA256,
476 }, nil
477}
478
479func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
480 packet, err := c.readPacket()
481 if err != nil {
482 return
483 }
484 var kexInit kexECDHInitMsg
485 if err = Unmarshal(packet, &kexInit); err != nil {
486 return
487 }
488
489 if len(kexInit.ClientPubKey) != 32 {
490 return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
491 }
492
493 var kp curve25519KeyPair
494 if err := kp.generate(rand); err != nil {
495 return nil, err
496 }
497
498 var clientPub, secret [32]byte
499 copy(clientPub[:], kexInit.ClientPubKey)
500 curve25519.ScalarMult(&secret, &kp.priv, &clientPub)
501 if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
502 return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
503 }
504
505 hostKeyBytes := priv.PublicKey().Marshal()
506
507 h := crypto.SHA256.New()
508 magics.write(h)
509 writeString(h, hostKeyBytes)
510 writeString(h, kexInit.ClientPubKey)
511 writeString(h, kp.pub[:])
512
513 kInt := new(big.Int).SetBytes(secret[:])
514 K := make([]byte, intLength(kInt))
515 marshalInt(K, kInt)
516 h.Write(K)
517
518 H := h.Sum(nil)
519
520 sig, err := signAndMarshal(priv, rand, H)
521 if err != nil {
522 return nil, err
523 }
524
525 reply := kexECDHReplyMsg{
526 EphemeralPubKey: kp.pub[:],
527 HostKey: hostKeyBytes,
528 Signature: sig,
529 }
530 if err := c.writePacket(Marshal(&reply)); err != nil {
531 return nil, err
532 }
533 return &kexResult{
534 H: H,
535 K: K,
536 HostKey: hostKeyBytes,
537 Signature: sig,
538 Hash: crypto.SHA256,
539 }, nil
540}
diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
new file mode 100644
index 0000000..f38de98
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/keys.go
@@ -0,0 +1,905 @@
1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "bytes"
9 "crypto"
10 "crypto/dsa"
11 "crypto/ecdsa"
12 "crypto/elliptic"
13 "crypto/md5"
14 "crypto/rsa"
15 "crypto/sha256"
16 "crypto/x509"
17 "encoding/asn1"
18 "encoding/base64"
19 "encoding/hex"
20 "encoding/pem"
21 "errors"
22 "fmt"
23 "io"
24 "math/big"
25 "strings"
26
27 "golang.org/x/crypto/ed25519"
28)
29
30// These constants represent the algorithm names for key types supported by this
31// package.
32const (
33 KeyAlgoRSA = "ssh-rsa"
34 KeyAlgoDSA = "ssh-dss"
35 KeyAlgoECDSA256 = "ecdsa-sha2-nistp256"
36 KeyAlgoECDSA384 = "ecdsa-sha2-nistp384"
37 KeyAlgoECDSA521 = "ecdsa-sha2-nistp521"
38 KeyAlgoED25519 = "ssh-ed25519"
39)
40
41// parsePubKey parses a public key of the given algorithm.
42// Use ParsePublicKey for keys with prepended algorithm.
43func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) {
44 switch algo {
45 case KeyAlgoRSA:
46 return parseRSA(in)
47 case KeyAlgoDSA:
48 return parseDSA(in)
49 case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
50 return parseECDSA(in)
51 case KeyAlgoED25519:
52 return parseED25519(in)
53 case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01:
54 cert, err := parseCert(in, certToPrivAlgo(algo))
55 if err != nil {
56 return nil, nil, err
57 }
58 return cert, nil, nil
59 }
60 return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo)
61}
62
63// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format
64// (see sshd(8) manual page) once the options and key type fields have been
65// removed.
66func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) {
67 in = bytes.TrimSpace(in)
68
69 i := bytes.IndexAny(in, " \t")
70 if i == -1 {
71 i = len(in)
72 }
73 base64Key := in[:i]
74
75 key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key)))
76 n, err := base64.StdEncoding.Decode(key, base64Key)
77 if err != nil {
78 return nil, "", err
79 }
80 key = key[:n]
81 out, err = ParsePublicKey(key)
82 if err != nil {
83 return nil, "", err
84 }
85 comment = string(bytes.TrimSpace(in[i:]))
86 return out, comment, nil
87}
88
89// ParseKnownHosts parses an entry in the format of the known_hosts file.
90//
91// The known_hosts format is documented in the sshd(8) manual page. This
92// function will parse a single entry from in. On successful return, marker
93// will contain the optional marker value (i.e. "cert-authority" or "revoked")
94// or else be empty, hosts will contain the hosts that this entry matches,
95// pubKey will contain the public key and comment will contain any trailing
96// comment at the end of the line. See the sshd(8) manual page for the various
97// forms that a host string can take.
98//
99// The unparsed remainder of the input will be returned in rest. This function
100// can be called repeatedly to parse multiple entries.
101//
102// If no entries were found in the input then err will be io.EOF. Otherwise a
103// non-nil err value indicates a parse error.
104func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) {
105 for len(in) > 0 {
106 end := bytes.IndexByte(in, '\n')
107 if end != -1 {
108 rest = in[end+1:]
109 in = in[:end]
110 } else {
111 rest = nil
112 }
113
114 end = bytes.IndexByte(in, '\r')
115 if end != -1 {
116 in = in[:end]
117 }
118
119 in = bytes.TrimSpace(in)
120 if len(in) == 0 || in[0] == '#' {
121 in = rest
122 continue
123 }
124
125 i := bytes.IndexAny(in, " \t")
126 if i == -1 {
127 in = rest
128 continue
129 }
130
131 // Strip out the beginning of the known_host key.
132 // This is either an optional marker or a (set of) hostname(s).
133 keyFields := bytes.Fields(in)
134 if len(keyFields) < 3 || len(keyFields) > 5 {
135 return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data")
136 }
137
138 // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated
139 // list of hosts
140 marker := ""
141 if keyFields[0][0] == '@' {
142 marker = string(keyFields[0][1:])
143 keyFields = keyFields[1:]
144 }
145
146 hosts := string(keyFields[0])
147 // keyFields[1] contains the key type (e.g. “ssh-rsa”).
148 // However, that information is duplicated inside the
149 // base64-encoded key and so is ignored here.
150
151 key := bytes.Join(keyFields[2:], []byte(" "))
152 if pubKey, comment, err = parseAuthorizedKey(key); err != nil {
153 return "", nil, nil, "", nil, err
154 }
155
156 return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil
157 }
158
159 return "", nil, nil, "", nil, io.EOF
160}
161
162// ParseAuthorizedKeys parses a public key from an authorized_keys
163// file used in OpenSSH according to the sshd(8) manual page.
164func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) {
165 for len(in) > 0 {
166 end := bytes.IndexByte(in, '\n')
167 if end != -1 {
168 rest = in[end+1:]
169 in = in[:end]
170 } else {
171 rest = nil
172 }
173
174 end = bytes.IndexByte(in, '\r')
175 if end != -1 {
176 in = in[:end]
177 }
178
179 in = bytes.TrimSpace(in)
180 if len(in) == 0 || in[0] == '#' {
181 in = rest
182 continue
183 }
184
185 i := bytes.IndexAny(in, " \t")
186 if i == -1 {
187 in = rest
188 continue
189 }
190
191 if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
192 return out, comment, options, rest, nil
193 }
194
195 // No key type recognised. Maybe there's an options field at
196 // the beginning.
197 var b byte
198 inQuote := false
199 var candidateOptions []string
200 optionStart := 0
201 for i, b = range in {
202 isEnd := !inQuote && (b == ' ' || b == '\t')
203 if (b == ',' && !inQuote) || isEnd {
204 if i-optionStart > 0 {
205 candidateOptions = append(candidateOptions, string(in[optionStart:i]))
206 }
207 optionStart = i + 1
208 }
209 if isEnd {
210 break
211 }
212 if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) {
213 inQuote = !inQuote
214 }
215 }
216 for i < len(in) && (in[i] == ' ' || in[i] == '\t') {
217 i++
218 }
219 if i == len(in) {
220 // Invalid line: unmatched quote
221 in = rest
222 continue
223 }
224
225 in = in[i:]
226 i = bytes.IndexAny(in, " \t")
227 if i == -1 {
228 in = rest
229 continue
230 }
231
232 if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
233 options = candidateOptions
234 return out, comment, options, rest, nil
235 }
236
237 in = rest
238 continue
239 }
240
241 return nil, "", nil, nil, errors.New("ssh: no key found")
242}
243
244// ParsePublicKey parses an SSH public key formatted for use in
245// the SSH wire protocol according to RFC 4253, section 6.6.
246func ParsePublicKey(in []byte) (out PublicKey, err error) {
247 algo, in, ok := parseString(in)
248 if !ok {
249 return nil, errShortRead
250 }
251 var rest []byte
252 out, rest, err = parsePubKey(in, string(algo))
253 if len(rest) > 0 {
254 return nil, errors.New("ssh: trailing junk in public key")
255 }
256
257 return out, err
258}
259
260// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH
261// authorized_keys file. The return value ends with newline.
262func MarshalAuthorizedKey(key PublicKey) []byte {
263 b := &bytes.Buffer{}
264 b.WriteString(key.Type())
265 b.WriteByte(' ')
266 e := base64.NewEncoder(base64.StdEncoding, b)
267 e.Write(key.Marshal())
268 e.Close()
269 b.WriteByte('\n')
270 return b.Bytes()
271}
272
273// PublicKey is an abstraction of different types of public keys.
274type PublicKey interface {
275 // Type returns the key's type, e.g. "ssh-rsa".
276 Type() string
277
278 // Marshal returns the serialized key data in SSH wire format,
279 // with the name prefix.
280 Marshal() []byte
281
282 // Verify that sig is a signature on the given data using this
283 // key. This function will hash the data appropriately first.
284 Verify(data []byte, sig *Signature) error
285}
286
287// CryptoPublicKey, if implemented by a PublicKey,
288// returns the underlying crypto.PublicKey form of the key.
289type CryptoPublicKey interface {
290 CryptoPublicKey() crypto.PublicKey
291}
292
293// A Signer can create signatures that verify against a public key.
294type Signer interface {
295 // PublicKey returns an associated PublicKey instance.
296 PublicKey() PublicKey
297
298 // Sign returns raw signature for the given data. This method
299 // will apply the hash specified for the keytype to the data.
300 Sign(rand io.Reader, data []byte) (*Signature, error)
301}
302
303type rsaPublicKey rsa.PublicKey
304
305func (r *rsaPublicKey) Type() string {
306 return "ssh-rsa"
307}
308
309// parseRSA parses an RSA key according to RFC 4253, section 6.6.
310func parseRSA(in []byte) (out PublicKey, rest []byte, err error) {
311 var w struct {
312 E *big.Int
313 N *big.Int
314 Rest []byte `ssh:"rest"`
315 }
316 if err := Unmarshal(in, &w); err != nil {
317 return nil, nil, err
318 }
319
320 if w.E.BitLen() > 24 {
321 return nil, nil, errors.New("ssh: exponent too large")
322 }
323 e := w.E.Int64()
324 if e < 3 || e&1 == 0 {
325 return nil, nil, errors.New("ssh: incorrect exponent")
326 }
327
328 var key rsa.PublicKey
329 key.E = int(e)
330 key.N = w.N
331 return (*rsaPublicKey)(&key), w.Rest, nil
332}
333
334func (r *rsaPublicKey) Marshal() []byte {
335 e := new(big.Int).SetInt64(int64(r.E))
336 // RSA publickey struct layout should match the struct used by
337 // parseRSACert in the x/crypto/ssh/agent package.
338 wirekey := struct {
339 Name string
340 E *big.Int
341 N *big.Int
342 }{
343 KeyAlgoRSA,
344 e,
345 r.N,
346 }
347 return Marshal(&wirekey)
348}
349
350func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
351 if sig.Format != r.Type() {
352 return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
353 }
354 h := crypto.SHA1.New()
355 h.Write(data)
356 digest := h.Sum(nil)
357 return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob)
358}
359
360func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
361 return (*rsa.PublicKey)(r)
362}
363
364type dsaPublicKey dsa.PublicKey
365
366func (r *dsaPublicKey) Type() string {
367 return "ssh-dss"
368}
369
370// parseDSA parses an DSA key according to RFC 4253, section 6.6.
371func parseDSA(in []byte) (out PublicKey, rest []byte, err error) {
372 var w struct {
373 P, Q, G, Y *big.Int
374 Rest []byte `ssh:"rest"`
375 }
376 if err := Unmarshal(in, &w); err != nil {
377 return nil, nil, err
378 }
379
380 key := &dsaPublicKey{
381 Parameters: dsa.Parameters{
382 P: w.P,
383 Q: w.Q,
384 G: w.G,
385 },
386 Y: w.Y,
387 }
388 return key, w.Rest, nil
389}
390
391func (k *dsaPublicKey) Marshal() []byte {
392 // DSA publickey struct layout should match the struct used by
393 // parseDSACert in the x/crypto/ssh/agent package.
394 w := struct {
395 Name string
396 P, Q, G, Y *big.Int
397 }{
398 k.Type(),
399 k.P,
400 k.Q,
401 k.G,
402 k.Y,
403 }
404
405 return Marshal(&w)
406}
407
408func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
409 if sig.Format != k.Type() {
410 return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
411 }
412 h := crypto.SHA1.New()
413 h.Write(data)
414 digest := h.Sum(nil)
415
416 // Per RFC 4253, section 6.6,
417 // The value for 'dss_signature_blob' is encoded as a string containing
418 // r, followed by s (which are 160-bit integers, without lengths or
419 // padding, unsigned, and in network byte order).
420 // For DSS purposes, sig.Blob should be exactly 40 bytes in length.
421 if len(sig.Blob) != 40 {
422 return errors.New("ssh: DSA signature parse error")
423 }
424 r := new(big.Int).SetBytes(sig.Blob[:20])
425 s := new(big.Int).SetBytes(sig.Blob[20:])
426 if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) {
427 return nil
428 }
429 return errors.New("ssh: signature did not verify")
430}
431
432func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey {
433 return (*dsa.PublicKey)(k)
434}
435
436type dsaPrivateKey struct {
437 *dsa.PrivateKey
438}
439
440func (k *dsaPrivateKey) PublicKey() PublicKey {
441 return (*dsaPublicKey)(&k.PrivateKey.PublicKey)
442}
443
444func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
445 h := crypto.SHA1.New()
446 h.Write(data)
447 digest := h.Sum(nil)
448 r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
449 if err != nil {
450 return nil, err
451 }
452
453 sig := make([]byte, 40)
454 rb := r.Bytes()
455 sb := s.Bytes()
456
457 copy(sig[20-len(rb):20], rb)
458 copy(sig[40-len(sb):], sb)
459
460 return &Signature{
461 Format: k.PublicKey().Type(),
462 Blob: sig,
463 }, nil
464}
465
466type ecdsaPublicKey ecdsa.PublicKey
467
468func (key *ecdsaPublicKey) Type() string {
469 return "ecdsa-sha2-" + key.nistID()
470}
471
472func (key *ecdsaPublicKey) nistID() string {
473 switch key.Params().BitSize {
474 case 256:
475 return "nistp256"
476 case 384:
477 return "nistp384"
478 case 521:
479 return "nistp521"
480 }
481 panic("ssh: unsupported ecdsa key size")
482}
483
484type ed25519PublicKey ed25519.PublicKey
485
486func (key ed25519PublicKey) Type() string {
487 return KeyAlgoED25519
488}
489
490func parseED25519(in []byte) (out PublicKey, rest []byte, err error) {
491 var w struct {
492 KeyBytes []byte
493 Rest []byte `ssh:"rest"`
494 }
495
496 if err := Unmarshal(in, &w); err != nil {
497 return nil, nil, err
498 }
499
500 key := ed25519.PublicKey(w.KeyBytes)
501
502 return (ed25519PublicKey)(key), w.Rest, nil
503}
504
505func (key ed25519PublicKey) Marshal() []byte {
506 w := struct {
507 Name string
508 KeyBytes []byte
509 }{
510 KeyAlgoED25519,
511 []byte(key),
512 }
513 return Marshal(&w)
514}
515
516func (key ed25519PublicKey) Verify(b []byte, sig *Signature) error {
517 if sig.Format != key.Type() {
518 return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
519 }
520
521 edKey := (ed25519.PublicKey)(key)
522 if ok := ed25519.Verify(edKey, b, sig.Blob); !ok {
523 return errors.New("ssh: signature did not verify")
524 }
525
526 return nil
527}
528
529func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey {
530 return ed25519.PublicKey(k)
531}
532
533func supportedEllipticCurve(curve elliptic.Curve) bool {
534 return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521()
535}
536
537// ecHash returns the hash to match the given elliptic curve, see RFC
538// 5656, section 6.2.1
539func ecHash(curve elliptic.Curve) crypto.Hash {
540 bitSize := curve.Params().BitSize
541 switch {
542 case bitSize <= 256:
543 return crypto.SHA256
544 case bitSize <= 384:
545 return crypto.SHA384
546 }
547 return crypto.SHA512
548}
549
550// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1.
551func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) {
552 var w struct {
553 Curve string
554 KeyBytes []byte
555 Rest []byte `ssh:"rest"`
556 }
557
558 if err := Unmarshal(in, &w); err != nil {
559 return nil, nil, err
560 }
561
562 key := new(ecdsa.PublicKey)
563
564 switch w.Curve {
565 case "nistp256":
566 key.Curve = elliptic.P256()
567 case "nistp384":
568 key.Curve = elliptic.P384()
569 case "nistp521":
570 key.Curve = elliptic.P521()
571 default:
572 return nil, nil, errors.New("ssh: unsupported curve")
573 }
574
575 key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes)
576 if key.X == nil || key.Y == nil {
577 return nil, nil, errors.New("ssh: invalid curve point")
578 }
579 return (*ecdsaPublicKey)(key), w.Rest, nil
580}
581
582func (key *ecdsaPublicKey) Marshal() []byte {
583 // See RFC 5656, section 3.1.
584 keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y)
585 // ECDSA publickey struct layout should match the struct used by
586 // parseECDSACert in the x/crypto/ssh/agent package.
587 w := struct {
588 Name string
589 ID string
590 Key []byte
591 }{
592 key.Type(),
593 key.nistID(),
594 keyBytes,
595 }
596
597 return Marshal(&w)
598}
599
600func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
601 if sig.Format != key.Type() {
602 return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
603 }
604
605 h := ecHash(key.Curve).New()
606 h.Write(data)
607 digest := h.Sum(nil)
608
609 // Per RFC 5656, section 3.1.2,
610 // The ecdsa_signature_blob value has the following specific encoding:
611 // mpint r
612 // mpint s
613 var ecSig struct {
614 R *big.Int
615 S *big.Int
616 }
617
618 if err := Unmarshal(sig.Blob, &ecSig); err != nil {
619 return err
620 }
621
622 if ecdsa.Verify((*ecdsa.PublicKey)(key), digest, ecSig.R, ecSig.S) {
623 return nil
624 }
625 return errors.New("ssh: signature did not verify")
626}
627
628func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey {
629 return (*ecdsa.PublicKey)(k)
630}
631
632// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey,
633// *ecdsa.PrivateKey or any other crypto.Signer and returns a corresponding
634// Signer instance. ECDSA keys must use P-256, P-384 or P-521.
635func NewSignerFromKey(key interface{}) (Signer, error) {
636 switch key := key.(type) {
637 case crypto.Signer:
638 return NewSignerFromSigner(key)
639 case *dsa.PrivateKey:
640 return &dsaPrivateKey{key}, nil
641 default:
642 return nil, fmt.Errorf("ssh: unsupported key type %T", key)
643 }
644}
645
646type wrappedSigner struct {
647 signer crypto.Signer
648 pubKey PublicKey
649}
650
651// NewSignerFromSigner takes any crypto.Signer implementation and
652// returns a corresponding Signer interface. This can be used, for
653// example, with keys kept in hardware modules.
654func NewSignerFromSigner(signer crypto.Signer) (Signer, error) {
655 pubKey, err := NewPublicKey(signer.Public())
656 if err != nil {
657 return nil, err
658 }
659
660 return &wrappedSigner{signer, pubKey}, nil
661}
662
663func (s *wrappedSigner) PublicKey() PublicKey {
664 return s.pubKey
665}
666
667func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
668 var hashFunc crypto.Hash
669
670 switch key := s.pubKey.(type) {
671 case *rsaPublicKey, *dsaPublicKey:
672 hashFunc = crypto.SHA1
673 case *ecdsaPublicKey:
674 hashFunc = ecHash(key.Curve)
675 case ed25519PublicKey:
676 default:
677 return nil, fmt.Errorf("ssh: unsupported key type %T", key)
678 }
679
680 var digest []byte
681 if hashFunc != 0 {
682 h := hashFunc.New()
683 h.Write(data)
684 digest = h.Sum(nil)
685 } else {
686 digest = data
687 }
688
689 signature, err := s.signer.Sign(rand, digest, hashFunc)
690 if err != nil {
691 return nil, err
692 }
693
694 // crypto.Signer.Sign is expected to return an ASN.1-encoded signature
695 // for ECDSA and DSA, but that's not the encoding expected by SSH, so
696 // re-encode.
697 switch s.pubKey.(type) {
698 case *ecdsaPublicKey, *dsaPublicKey:
699 type asn1Signature struct {
700 R, S *big.Int
701 }
702 asn1Sig := new(asn1Signature)
703 _, err := asn1.Unmarshal(signature, asn1Sig)
704 if err != nil {
705 return nil, err
706 }
707
708 switch s.pubKey.(type) {
709 case *ecdsaPublicKey:
710 signature = Marshal(asn1Sig)
711
712 case *dsaPublicKey:
713 signature = make([]byte, 40)
714 r := asn1Sig.R.Bytes()
715 s := asn1Sig.S.Bytes()
716 copy(signature[20-len(r):20], r)
717 copy(signature[40-len(s):40], s)
718 }
719 }
720
721 return &Signature{
722 Format: s.pubKey.Type(),
723 Blob: signature,
724 }, nil
725}
726
727// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey,
728// or ed25519.PublicKey returns a corresponding PublicKey instance.
729// ECDSA keys must use P-256, P-384 or P-521.
730func NewPublicKey(key interface{}) (PublicKey, error) {
731 switch key := key.(type) {
732 case *rsa.PublicKey:
733 return (*rsaPublicKey)(key), nil
734 case *ecdsa.PublicKey:
735 if !supportedEllipticCurve(key.Curve) {
736 return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.")
737 }
738 return (*ecdsaPublicKey)(key), nil
739 case *dsa.PublicKey:
740 return (*dsaPublicKey)(key), nil
741 case ed25519.PublicKey:
742 return (ed25519PublicKey)(key), nil
743 default:
744 return nil, fmt.Errorf("ssh: unsupported key type %T", key)
745 }
746}
747
748// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports
749// the same keys as ParseRawPrivateKey.
750func ParsePrivateKey(pemBytes []byte) (Signer, error) {
751 key, err := ParseRawPrivateKey(pemBytes)
752 if err != nil {
753 return nil, err
754 }
755
756 return NewSignerFromKey(key)
757}
758
759// encryptedBlock tells whether a private key is
760// encrypted by examining its Proc-Type header
761// for a mention of ENCRYPTED
762// according to RFC 1421 Section 4.6.1.1.
763func encryptedBlock(block *pem.Block) bool {
764 return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED")
765}
766
767// ParseRawPrivateKey returns a private key from a PEM encoded private key. It
768// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys.
769func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
770 block, _ := pem.Decode(pemBytes)
771 if block == nil {
772 return nil, errors.New("ssh: no key found")
773 }
774
775 if encryptedBlock(block) {
776 return nil, errors.New("ssh: cannot decode encrypted private keys")
777 }
778
779 switch block.Type {
780 case "RSA PRIVATE KEY":
781 return x509.ParsePKCS1PrivateKey(block.Bytes)
782 case "EC PRIVATE KEY":
783 return x509.ParseECPrivateKey(block.Bytes)
784 case "DSA PRIVATE KEY":
785 return ParseDSAPrivateKey(block.Bytes)
786 case "OPENSSH PRIVATE KEY":
787 return parseOpenSSHPrivateKey(block.Bytes)
788 default:
789 return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type)
790 }
791}
792
793// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as
794// specified by the OpenSSL DSA man page.
795func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
796 var k struct {
797 Version int
798 P *big.Int
799 Q *big.Int
800 G *big.Int
801 Pub *big.Int
802 Priv *big.Int
803 }
804 rest, err := asn1.Unmarshal(der, &k)
805 if err != nil {
806 return nil, errors.New("ssh: failed to parse DSA key: " + err.Error())
807 }
808 if len(rest) > 0 {
809 return nil, errors.New("ssh: garbage after DSA key")
810 }
811
812 return &dsa.PrivateKey{
813 PublicKey: dsa.PublicKey{
814 Parameters: dsa.Parameters{
815 P: k.P,
816 Q: k.Q,
817 G: k.G,
818 },
819 Y: k.Pub,
820 },
821 X: k.Priv,
822 }, nil
823}
824
825// Implemented based on the documentation at
826// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
827func parseOpenSSHPrivateKey(key []byte) (*ed25519.PrivateKey, error) {
828 magic := append([]byte("openssh-key-v1"), 0)
829 if !bytes.Equal(magic, key[0:len(magic)]) {
830 return nil, errors.New("ssh: invalid openssh private key format")
831 }
832 remaining := key[len(magic):]
833
834 var w struct {
835 CipherName string
836 KdfName string
837 KdfOpts string
838 NumKeys uint32
839 PubKey []byte
840 PrivKeyBlock []byte
841 }
842
843 if err := Unmarshal(remaining, &w); err != nil {
844 return nil, err
845 }
846
847 pk1 := struct {
848 Check1 uint32
849 Check2 uint32
850 Keytype string
851 Pub []byte
852 Priv []byte
853 Comment string
854 Pad []byte `ssh:"rest"`
855 }{}
856
857 if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil {
858 return nil, err
859 }
860
861 if pk1.Check1 != pk1.Check2 {
862 return nil, errors.New("ssh: checkint mismatch")
863 }
864
865 // we only handle ed25519 keys currently
866 if pk1.Keytype != KeyAlgoED25519 {
867 return nil, errors.New("ssh: unhandled key type")
868 }
869
870 for i, b := range pk1.Pad {
871 if int(b) != i+1 {
872 return nil, errors.New("ssh: padding not as expected")
873 }
874 }
875
876 if len(pk1.Priv) != ed25519.PrivateKeySize {
877 return nil, errors.New("ssh: private key unexpected length")
878 }
879
880 pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize))
881 copy(pk, pk1.Priv)
882 return &pk, nil
883}
884
885// FingerprintLegacyMD5 returns the user presentation of the key's
886// fingerprint as described by RFC 4716 section 4.
887func FingerprintLegacyMD5(pubKey PublicKey) string {
888 md5sum := md5.Sum(pubKey.Marshal())
889 hexarray := make([]string, len(md5sum))
890 for i, c := range md5sum {
891 hexarray[i] = hex.EncodeToString([]byte{c})
892 }
893 return strings.Join(hexarray, ":")
894}
895
896// FingerprintSHA256 returns the user presentation of the key's
897// fingerprint as unpadded base64 encoded sha256 hash.
898// This format was introduced from OpenSSH 6.8.
899// https://www.openssh.com/txt/release-6.8
900// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding)
901func FingerprintSHA256(pubKey PublicKey) string {
902 sha256sum := sha256.Sum256(pubKey.Marshal())
903 hash := base64.RawStdEncoding.EncodeToString(sha256sum[:])
904 return "SHA256:" + hash
905}
diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go
new file mode 100644
index 0000000..c07a062
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/mac.go
@@ -0,0 +1,61 @@
1// Copyright 2012 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7// Message authentication support
8
9import (
10 "crypto/hmac"
11 "crypto/sha1"
12 "crypto/sha256"
13 "hash"
14)
15
16type macMode struct {
17 keySize int
18 etm bool
19 new func(key []byte) hash.Hash
20}
21
22// truncatingMAC wraps around a hash.Hash and truncates the output digest to
23// a given size.
24type truncatingMAC struct {
25 length int
26 hmac hash.Hash
27}
28
29func (t truncatingMAC) Write(data []byte) (int, error) {
30 return t.hmac.Write(data)
31}
32
33func (t truncatingMAC) Sum(in []byte) []byte {
34 out := t.hmac.Sum(in)
35 return out[:len(in)+t.length]
36}
37
38func (t truncatingMAC) Reset() {
39 t.hmac.Reset()
40}
41
42func (t truncatingMAC) Size() int {
43 return t.length
44}
45
46func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
47
48var macModes = map[string]*macMode{
49 "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash {
50 return hmac.New(sha256.New, key)
51 }},
52 "hmac-sha2-256": {32, false, func(key []byte) hash.Hash {
53 return hmac.New(sha256.New, key)
54 }},
55 "hmac-sha1": {20, false, func(key []byte) hash.Hash {
56 return hmac.New(sha1.New, key)
57 }},
58 "hmac-sha1-96": {20, false, func(key []byte) hash.Hash {
59 return truncatingMAC{12, hmac.New(sha1.New, key)}
60 }},
61}
diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go
new file mode 100644
index 0000000..e6ecd3a
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/messages.go
@@ -0,0 +1,758 @@
1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "bytes"
9 "encoding/binary"
10 "errors"
11 "fmt"
12 "io"
13 "math/big"
14 "reflect"
15 "strconv"
16 "strings"
17)
18
19// These are SSH message type numbers. They are scattered around several
20// documents but many were taken from [SSH-PARAMETERS].
21const (
22 msgIgnore = 2
23 msgUnimplemented = 3
24 msgDebug = 4
25 msgNewKeys = 21
26
27 // Standard authentication messages
28 msgUserAuthSuccess = 52
29 msgUserAuthBanner = 53
30)
31
32// SSH messages:
33//
34// These structures mirror the wire format of the corresponding SSH messages.
35// They are marshaled using reflection with the marshal and unmarshal functions
36// in this file. The only wrinkle is that a final member of type []byte with a
37// ssh tag of "rest" receives the remainder of a packet when unmarshaling.
38
39// See RFC 4253, section 11.1.
40const msgDisconnect = 1
41
42// disconnectMsg is the message that signals a disconnect. It is also
43// the error type returned from mux.Wait()
44type disconnectMsg struct {
45 Reason uint32 `sshtype:"1"`
46 Message string
47 Language string
48}
49
50func (d *disconnectMsg) Error() string {
51 return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message)
52}
53
54// See RFC 4253, section 7.1.
55const msgKexInit = 20
56
57type kexInitMsg struct {
58 Cookie [16]byte `sshtype:"20"`
59 KexAlgos []string
60 ServerHostKeyAlgos []string
61 CiphersClientServer []string
62 CiphersServerClient []string
63 MACsClientServer []string
64 MACsServerClient []string
65 CompressionClientServer []string
66 CompressionServerClient []string
67 LanguagesClientServer []string
68 LanguagesServerClient []string
69 FirstKexFollows bool
70 Reserved uint32
71}
72
73// See RFC 4253, section 8.
74
75// Diffie-Helman
76const msgKexDHInit = 30
77
78type kexDHInitMsg struct {
79 X *big.Int `sshtype:"30"`
80}
81
82const msgKexECDHInit = 30
83
84type kexECDHInitMsg struct {
85 ClientPubKey []byte `sshtype:"30"`
86}
87
88const msgKexECDHReply = 31
89
90type kexECDHReplyMsg struct {
91 HostKey []byte `sshtype:"31"`
92 EphemeralPubKey []byte
93 Signature []byte
94}
95
96const msgKexDHReply = 31
97
98type kexDHReplyMsg struct {
99 HostKey []byte `sshtype:"31"`
100 Y *big.Int
101 Signature []byte
102}
103
104// See RFC 4253, section 10.
105const msgServiceRequest = 5
106
107type serviceRequestMsg struct {
108 Service string `sshtype:"5"`
109}
110
111// See RFC 4253, section 10.
112const msgServiceAccept = 6
113
114type serviceAcceptMsg struct {
115 Service string `sshtype:"6"`
116}
117
118// See RFC 4252, section 5.
119const msgUserAuthRequest = 50
120
121type userAuthRequestMsg struct {
122 User string `sshtype:"50"`
123 Service string
124 Method string
125 Payload []byte `ssh:"rest"`
126}
127
128// Used for debug printouts of packets.
129type userAuthSuccessMsg struct {
130}
131
132// See RFC 4252, section 5.1
133const msgUserAuthFailure = 51
134
135type userAuthFailureMsg struct {
136 Methods []string `sshtype:"51"`
137 PartialSuccess bool
138}
139
140// See RFC 4256, section 3.2
141const msgUserAuthInfoRequest = 60
142const msgUserAuthInfoResponse = 61
143
144type userAuthInfoRequestMsg struct {
145 User string `sshtype:"60"`
146 Instruction string
147 DeprecatedLanguage string
148 NumPrompts uint32
149 Prompts []byte `ssh:"rest"`
150}
151
152// See RFC 4254, section 5.1.
153const msgChannelOpen = 90
154
155type channelOpenMsg struct {
156 ChanType string `sshtype:"90"`
157 PeersId uint32
158 PeersWindow uint32
159 MaxPacketSize uint32
160 TypeSpecificData []byte `ssh:"rest"`
161}
162
163const msgChannelExtendedData = 95
164const msgChannelData = 94
165
166// Used for debug print outs of packets.
167type channelDataMsg struct {
168 PeersId uint32 `sshtype:"94"`
169 Length uint32
170 Rest []byte `ssh:"rest"`
171}
172
173// See RFC 4254, section 5.1.
174const msgChannelOpenConfirm = 91
175
176type channelOpenConfirmMsg struct {
177 PeersId uint32 `sshtype:"91"`
178 MyId uint32
179 MyWindow uint32
180 MaxPacketSize uint32
181 TypeSpecificData []byte `ssh:"rest"`
182}
183
184// See RFC 4254, section 5.1.
185const msgChannelOpenFailure = 92
186
187type channelOpenFailureMsg struct {
188 PeersId uint32 `sshtype:"92"`
189 Reason RejectionReason
190 Message string
191 Language string
192}
193
194const msgChannelRequest = 98
195
196type channelRequestMsg struct {
197 PeersId uint32 `sshtype:"98"`
198 Request string
199 WantReply bool
200 RequestSpecificData []byte `ssh:"rest"`
201}
202
203// See RFC 4254, section 5.4.
204const msgChannelSuccess = 99
205
206type channelRequestSuccessMsg struct {
207 PeersId uint32 `sshtype:"99"`
208}
209
210// See RFC 4254, section 5.4.
211const msgChannelFailure = 100
212
213type channelRequestFailureMsg struct {
214 PeersId uint32 `sshtype:"100"`
215}
216
217// See RFC 4254, section 5.3
218const msgChannelClose = 97
219
220type channelCloseMsg struct {
221 PeersId uint32 `sshtype:"97"`
222}
223
224// See RFC 4254, section 5.3
225const msgChannelEOF = 96
226
227type channelEOFMsg struct {
228 PeersId uint32 `sshtype:"96"`
229}
230
231// See RFC 4254, section 4
232const msgGlobalRequest = 80
233
234type globalRequestMsg struct {
235 Type string `sshtype:"80"`
236 WantReply bool
237 Data []byte `ssh:"rest"`
238}
239
240// See RFC 4254, section 4
241const msgRequestSuccess = 81
242
243type globalRequestSuccessMsg struct {
244 Data []byte `ssh:"rest" sshtype:"81"`
245}
246
247// See RFC 4254, section 4
248const msgRequestFailure = 82
249
250type globalRequestFailureMsg struct {
251 Data []byte `ssh:"rest" sshtype:"82"`
252}
253
254// See RFC 4254, section 5.2
255const msgChannelWindowAdjust = 93
256
257type windowAdjustMsg struct {
258 PeersId uint32 `sshtype:"93"`
259 AdditionalBytes uint32
260}
261
262// See RFC 4252, section 7
263const msgUserAuthPubKeyOk = 60
264
265type userAuthPubKeyOkMsg struct {
266 Algo string `sshtype:"60"`
267 PubKey []byte
268}
269
270// typeTags returns the possible type bytes for the given reflect.Type, which
271// should be a struct. The possible values are separated by a '|' character.
272func typeTags(structType reflect.Type) (tags []byte) {
273 tagStr := structType.Field(0).Tag.Get("sshtype")
274
275 for _, tag := range strings.Split(tagStr, "|") {
276 i, err := strconv.Atoi(tag)
277 if err == nil {
278 tags = append(tags, byte(i))
279 }
280 }
281
282 return tags
283}
284
285func fieldError(t reflect.Type, field int, problem string) error {
286 if problem != "" {
287 problem = ": " + problem
288 }
289 return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem)
290}
291
292var errShortRead = errors.New("ssh: short read")
293
294// Unmarshal parses data in SSH wire format into a structure. The out
295// argument should be a pointer to struct. If the first member of the
296// struct has the "sshtype" tag set to a '|'-separated set of numbers
297// in decimal, the packet must start with one of those numbers. In
298// case of error, Unmarshal returns a ParseError or
299// UnexpectedMessageError.
300func Unmarshal(data []byte, out interface{}) error {
301 v := reflect.ValueOf(out).Elem()
302 structType := v.Type()
303 expectedTypes := typeTags(structType)
304
305 var expectedType byte
306 if len(expectedTypes) > 0 {
307 expectedType = expectedTypes[0]
308 }
309
310 if len(data) == 0 {
311 return parseError(expectedType)
312 }
313
314 if len(expectedTypes) > 0 {
315 goodType := false
316 for _, e := range expectedTypes {
317 if e > 0 && data[0] == e {
318 goodType = true
319 break
320 }
321 }
322 if !goodType {
323 return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes)
324 }
325 data = data[1:]
326 }
327
328 var ok bool
329 for i := 0; i < v.NumField(); i++ {
330 field := v.Field(i)
331 t := field.Type()
332 switch t.Kind() {
333 case reflect.Bool:
334 if len(data) < 1 {
335 return errShortRead
336 }
337 field.SetBool(data[0] != 0)
338 data = data[1:]
339 case reflect.Array:
340 if t.Elem().Kind() != reflect.Uint8 {
341 return fieldError(structType, i, "array of unsupported type")
342 }
343 if len(data) < t.Len() {
344 return errShortRead
345 }
346 for j, n := 0, t.Len(); j < n; j++ {
347 field.Index(j).Set(reflect.ValueOf(data[j]))
348 }
349 data = data[t.Len():]
350 case reflect.Uint64:
351 var u64 uint64
352 if u64, data, ok = parseUint64(data); !ok {
353 return errShortRead
354 }
355 field.SetUint(u64)
356 case reflect.Uint32:
357 var u32 uint32
358 if u32, data, ok = parseUint32(data); !ok {
359 return errShortRead
360 }
361 field.SetUint(uint64(u32))
362 case reflect.Uint8:
363 if len(data) < 1 {
364 return errShortRead
365 }
366 field.SetUint(uint64(data[0]))
367 data = data[1:]
368 case reflect.String:
369 var s []byte
370 if s, data, ok = parseString(data); !ok {
371 return fieldError(structType, i, "")
372 }
373 field.SetString(string(s))
374 case reflect.Slice:
375 switch t.Elem().Kind() {
376 case reflect.Uint8:
377 if structType.Field(i).Tag.Get("ssh") == "rest" {
378 field.Set(reflect.ValueOf(data))
379 data = nil
380 } else {
381 var s []byte
382 if s, data, ok = parseString(data); !ok {
383 return errShortRead
384 }
385 field.Set(reflect.ValueOf(s))
386 }
387 case reflect.String:
388 var nl []string
389 if nl, data, ok = parseNameList(data); !ok {
390 return errShortRead
391 }
392 field.Set(reflect.ValueOf(nl))
393 default:
394 return fieldError(structType, i, "slice of unsupported type")
395 }
396 case reflect.Ptr:
397 if t == bigIntType {
398 var n *big.Int
399 if n, data, ok = parseInt(data); !ok {
400 return errShortRead
401 }
402 field.Set(reflect.ValueOf(n))
403 } else {
404 return fieldError(structType, i, "pointer to unsupported type")
405 }
406 default:
407 return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t))
408 }
409 }
410
411 if len(data) != 0 {
412 return parseError(expectedType)
413 }
414
415 return nil
416}
417
418// Marshal serializes the message in msg to SSH wire format. The msg
419// argument should be a struct or pointer to struct. If the first
420// member has the "sshtype" tag set to a number in decimal, that
421// number is prepended to the result. If the last of member has the
422// "ssh" tag set to "rest", its contents are appended to the output.
423func Marshal(msg interface{}) []byte {
424 out := make([]byte, 0, 64)
425 return marshalStruct(out, msg)
426}
427
428func marshalStruct(out []byte, msg interface{}) []byte {
429 v := reflect.Indirect(reflect.ValueOf(msg))
430 msgTypes := typeTags(v.Type())
431 if len(msgTypes) > 0 {
432 out = append(out, msgTypes[0])
433 }
434
435 for i, n := 0, v.NumField(); i < n; i++ {
436 field := v.Field(i)
437 switch t := field.Type(); t.Kind() {
438 case reflect.Bool:
439 var v uint8
440 if field.Bool() {
441 v = 1
442 }
443 out = append(out, v)
444 case reflect.Array:
445 if t.Elem().Kind() != reflect.Uint8 {
446 panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface()))
447 }
448 for j, l := 0, t.Len(); j < l; j++ {
449 out = append(out, uint8(field.Index(j).Uint()))
450 }
451 case reflect.Uint32:
452 out = appendU32(out, uint32(field.Uint()))
453 case reflect.Uint64:
454 out = appendU64(out, uint64(field.Uint()))
455 case reflect.Uint8:
456 out = append(out, uint8(field.Uint()))
457 case reflect.String:
458 s := field.String()
459 out = appendInt(out, len(s))
460 out = append(out, s...)
461 case reflect.Slice:
462 switch t.Elem().Kind() {
463 case reflect.Uint8:
464 if v.Type().Field(i).Tag.Get("ssh") != "rest" {
465 out = appendInt(out, field.Len())
466 }
467 out = append(out, field.Bytes()...)
468 case reflect.String:
469 offset := len(out)
470 out = appendU32(out, 0)
471 if n := field.Len(); n > 0 {
472 for j := 0; j < n; j++ {
473 f := field.Index(j)
474 if j != 0 {
475 out = append(out, ',')
476 }
477 out = append(out, f.String()...)
478 }
479 // overwrite length value
480 binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4))
481 }
482 default:
483 panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface()))
484 }
485 case reflect.Ptr:
486 if t == bigIntType {
487 var n *big.Int
488 nValue := reflect.ValueOf(&n)
489 nValue.Elem().Set(field)
490 needed := intLength(n)
491 oldLength := len(out)
492
493 if cap(out)-len(out) < needed {
494 newOut := make([]byte, len(out), 2*(len(out)+needed))
495 copy(newOut, out)
496 out = newOut
497 }
498 out = out[:oldLength+needed]
499 marshalInt(out[oldLength:], n)
500 } else {
501 panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface()))
502 }
503 }
504 }
505
506 return out
507}
508
509var bigOne = big.NewInt(1)
510
511func parseString(in []byte) (out, rest []byte, ok bool) {
512 if len(in) < 4 {
513 return
514 }
515 length := binary.BigEndian.Uint32(in)
516 in = in[4:]
517 if uint32(len(in)) < length {
518 return
519 }
520 out = in[:length]
521 rest = in[length:]
522 ok = true
523 return
524}
525
526var (
527 comma = []byte{','}
528 emptyNameList = []string{}
529)
530
531func parseNameList(in []byte) (out []string, rest []byte, ok bool) {
532 contents, rest, ok := parseString(in)
533 if !ok {
534 return
535 }
536 if len(contents) == 0 {
537 out = emptyNameList
538 return
539 }
540 parts := bytes.Split(contents, comma)
541 out = make([]string, len(parts))
542 for i, part := range parts {
543 out[i] = string(part)
544 }
545 return
546}
547
548func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) {
549 contents, rest, ok := parseString(in)
550 if !ok {
551 return
552 }
553 out = new(big.Int)
554
555 if len(contents) > 0 && contents[0]&0x80 == 0x80 {
556 // This is a negative number
557 notBytes := make([]byte, len(contents))
558 for i := range notBytes {
559 notBytes[i] = ^contents[i]
560 }
561 out.SetBytes(notBytes)
562 out.Add(out, bigOne)
563 out.Neg(out)
564 } else {
565 // Positive number
566 out.SetBytes(contents)
567 }
568 ok = true
569 return
570}
571
572func parseUint32(in []byte) (uint32, []byte, bool) {
573 if len(in) < 4 {
574 return 0, nil, false
575 }
576 return binary.BigEndian.Uint32(in), in[4:], true
577}
578
579func parseUint64(in []byte) (uint64, []byte, bool) {
580 if len(in) < 8 {
581 return 0, nil, false
582 }
583 return binary.BigEndian.Uint64(in), in[8:], true
584}
585
586func intLength(n *big.Int) int {
587 length := 4 /* length bytes */
588 if n.Sign() < 0 {
589 nMinus1 := new(big.Int).Neg(n)
590 nMinus1.Sub(nMinus1, bigOne)
591 bitLen := nMinus1.BitLen()
592 if bitLen%8 == 0 {
593 // The number will need 0xff padding
594 length++
595 }
596 length += (bitLen + 7) / 8
597 } else if n.Sign() == 0 {
598 // A zero is the zero length string
599 } else {
600 bitLen := n.BitLen()
601 if bitLen%8 == 0 {
602 // The number will need 0x00 padding
603 length++
604 }
605 length += (bitLen + 7) / 8
606 }
607
608 return length
609}
610
611func marshalUint32(to []byte, n uint32) []byte {
612 binary.BigEndian.PutUint32(to, n)
613 return to[4:]
614}
615
616func marshalUint64(to []byte, n uint64) []byte {
617 binary.BigEndian.PutUint64(to, n)
618 return to[8:]
619}
620
621func marshalInt(to []byte, n *big.Int) []byte {
622 lengthBytes := to
623 to = to[4:]
624 length := 0
625
626 if n.Sign() < 0 {
627 // A negative number has to be converted to two's-complement
628 // form. So we'll subtract 1 and invert. If the
629 // most-significant-bit isn't set then we'll need to pad the
630 // beginning with 0xff in order to keep the number negative.
631 nMinus1 := new(big.Int).Neg(n)
632 nMinus1.Sub(nMinus1, bigOne)
633 bytes := nMinus1.Bytes()
634 for i := range bytes {
635 bytes[i] ^= 0xff
636 }
637 if len(bytes) == 0 || bytes[0]&0x80 == 0 {
638 to[0] = 0xff
639 to = to[1:]
640 length++
641 }
642 nBytes := copy(to, bytes)
643 to = to[nBytes:]
644 length += nBytes
645 } else if n.Sign() == 0 {
646 // A zero is the zero length string
647 } else {
648 bytes := n.Bytes()
649 if len(bytes) > 0 && bytes[0]&0x80 != 0 {
650 // We'll have to pad this with a 0x00 in order to
651 // stop it looking like a negative number.
652 to[0] = 0
653 to = to[1:]
654 length++
655 }
656 nBytes := copy(to, bytes)
657 to = to[nBytes:]
658 length += nBytes
659 }
660
661 lengthBytes[0] = byte(length >> 24)
662 lengthBytes[1] = byte(length >> 16)
663 lengthBytes[2] = byte(length >> 8)
664 lengthBytes[3] = byte(length)
665 return to
666}
667
668func writeInt(w io.Writer, n *big.Int) {
669 length := intLength(n)
670 buf := make([]byte, length)
671 marshalInt(buf, n)
672 w.Write(buf)
673}
674
675func writeString(w io.Writer, s []byte) {
676 var lengthBytes [4]byte
677 lengthBytes[0] = byte(len(s) >> 24)
678 lengthBytes[1] = byte(len(s) >> 16)
679 lengthBytes[2] = byte(len(s) >> 8)
680 lengthBytes[3] = byte(len(s))
681 w.Write(lengthBytes[:])
682 w.Write(s)
683}
684
685func stringLength(n int) int {
686 return 4 + n
687}
688
689func marshalString(to []byte, s []byte) []byte {
690 to[0] = byte(len(s) >> 24)
691 to[1] = byte(len(s) >> 16)
692 to[2] = byte(len(s) >> 8)
693 to[3] = byte(len(s))
694 to = to[4:]
695 copy(to, s)
696 return to[len(s):]
697}
698
699var bigIntType = reflect.TypeOf((*big.Int)(nil))
700
701// Decode a packet into its corresponding message.
702func decode(packet []byte) (interface{}, error) {
703 var msg interface{}
704 switch packet[0] {
705 case msgDisconnect:
706 msg = new(disconnectMsg)
707 case msgServiceRequest:
708 msg = new(serviceRequestMsg)
709 case msgServiceAccept:
710 msg = new(serviceAcceptMsg)
711 case msgKexInit:
712 msg = new(kexInitMsg)
713 case msgKexDHInit:
714 msg = new(kexDHInitMsg)
715 case msgKexDHReply:
716 msg = new(kexDHReplyMsg)
717 case msgUserAuthRequest:
718 msg = new(userAuthRequestMsg)
719 case msgUserAuthSuccess:
720 return new(userAuthSuccessMsg), nil
721 case msgUserAuthFailure:
722 msg = new(userAuthFailureMsg)
723 case msgUserAuthPubKeyOk:
724 msg = new(userAuthPubKeyOkMsg)
725 case msgGlobalRequest:
726 msg = new(globalRequestMsg)
727 case msgRequestSuccess:
728 msg = new(globalRequestSuccessMsg)
729 case msgRequestFailure:
730 msg = new(globalRequestFailureMsg)
731 case msgChannelOpen:
732 msg = new(channelOpenMsg)
733 case msgChannelData:
734 msg = new(channelDataMsg)
735 case msgChannelOpenConfirm:
736 msg = new(channelOpenConfirmMsg)
737 case msgChannelOpenFailure:
738 msg = new(channelOpenFailureMsg)
739 case msgChannelWindowAdjust:
740 msg = new(windowAdjustMsg)
741 case msgChannelEOF:
742 msg = new(channelEOFMsg)
743 case msgChannelClose:
744 msg = new(channelCloseMsg)
745 case msgChannelRequest:
746 msg = new(channelRequestMsg)
747 case msgChannelSuccess:
748 msg = new(channelRequestSuccessMsg)
749 case msgChannelFailure:
750 msg = new(channelRequestFailureMsg)
751 default:
752 return nil, unexpectedMessageError(0, packet[0])
753 }
754 if err := Unmarshal(packet, msg); err != nil {
755 return nil, err
756 }
757 return msg, nil
758}
diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go
new file mode 100644
index 0000000..27a527c
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/mux.go
@@ -0,0 +1,330 @@
1// Copyright 2013 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "encoding/binary"
9 "fmt"
10 "io"
11 "log"
12 "sync"
13 "sync/atomic"
14)
15
16// debugMux, if set, causes messages in the connection protocol to be
17// logged.
18const debugMux = false
19
20// chanList is a thread safe channel list.
21type chanList struct {
22 // protects concurrent access to chans
23 sync.Mutex
24
25 // chans are indexed by the local id of the channel, which the
26 // other side should send in the PeersId field.
27 chans []*channel
28
29 // This is a debugging aid: it offsets all IDs by this
30 // amount. This helps distinguish otherwise identical
31 // server/client muxes
32 offset uint32
33}
34
35// Assigns a channel ID to the given channel.
36func (c *chanList) add(ch *channel) uint32 {
37 c.Lock()
38 defer c.Unlock()
39 for i := range c.chans {
40 if c.chans[i] == nil {
41 c.chans[i] = ch
42 return uint32(i) + c.offset
43 }
44 }
45 c.chans = append(c.chans, ch)
46 return uint32(len(c.chans)-1) + c.offset
47}
48
49// getChan returns the channel for the given ID.
50func (c *chanList) getChan(id uint32) *channel {
51 id -= c.offset
52
53 c.Lock()
54 defer c.Unlock()
55 if id < uint32(len(c.chans)) {
56 return c.chans[id]
57 }
58 return nil
59}
60
61func (c *chanList) remove(id uint32) {
62 id -= c.offset
63 c.Lock()
64 if id < uint32(len(c.chans)) {
65 c.chans[id] = nil
66 }
67 c.Unlock()
68}
69
70// dropAll forgets all channels it knows, returning them in a slice.
71func (c *chanList) dropAll() []*channel {
72 c.Lock()
73 defer c.Unlock()
74 var r []*channel
75
76 for _, ch := range c.chans {
77 if ch == nil {
78 continue
79 }
80 r = append(r, ch)
81 }
82 c.chans = nil
83 return r
84}
85
86// mux represents the state for the SSH connection protocol, which
87// multiplexes many channels onto a single packet transport.
88type mux struct {
89 conn packetConn
90 chanList chanList
91
92 incomingChannels chan NewChannel
93
94 globalSentMu sync.Mutex
95 globalResponses chan interface{}
96 incomingRequests chan *Request
97
98 errCond *sync.Cond
99 err error
100}
101
102// When debugging, each new chanList instantiation has a different
103// offset.
104var globalOff uint32
105
106func (m *mux) Wait() error {
107 m.errCond.L.Lock()
108 defer m.errCond.L.Unlock()
109 for m.err == nil {
110 m.errCond.Wait()
111 }
112 return m.err
113}
114
115// newMux returns a mux that runs over the given connection.
116func newMux(p packetConn) *mux {
117 m := &mux{
118 conn: p,
119 incomingChannels: make(chan NewChannel, chanSize),
120 globalResponses: make(chan interface{}, 1),
121 incomingRequests: make(chan *Request, chanSize),
122 errCond: newCond(),
123 }
124 if debugMux {
125 m.chanList.offset = atomic.AddUint32(&globalOff, 1)
126 }
127
128 go m.loop()
129 return m
130}
131
132func (m *mux) sendMessage(msg interface{}) error {
133 p := Marshal(msg)
134 if debugMux {
135 log.Printf("send global(%d): %#v", m.chanList.offset, msg)
136 }
137 return m.conn.writePacket(p)
138}
139
140func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) {
141 if wantReply {
142 m.globalSentMu.Lock()
143 defer m.globalSentMu.Unlock()
144 }
145
146 if err := m.sendMessage(globalRequestMsg{
147 Type: name,
148 WantReply: wantReply,
149 Data: payload,
150 }); err != nil {
151 return false, nil, err
152 }
153
154 if !wantReply {
155 return false, nil, nil
156 }
157
158 msg, ok := <-m.globalResponses
159 if !ok {
160 return false, nil, io.EOF
161 }
162 switch msg := msg.(type) {
163 case *globalRequestFailureMsg:
164 return false, msg.Data, nil
165 case *globalRequestSuccessMsg:
166 return true, msg.Data, nil
167 default:
168 return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg)
169 }
170}
171
172// ackRequest must be called after processing a global request that
173// has WantReply set.
174func (m *mux) ackRequest(ok bool, data []byte) error {
175 if ok {
176 return m.sendMessage(globalRequestSuccessMsg{Data: data})
177 }
178 return m.sendMessage(globalRequestFailureMsg{Data: data})
179}
180
181func (m *mux) Close() error {
182 return m.conn.Close()
183}
184
185// loop runs the connection machine. It will process packets until an
186// error is encountered. To synchronize on loop exit, use mux.Wait.
187func (m *mux) loop() {
188 var err error
189 for err == nil {
190 err = m.onePacket()
191 }
192
193 for _, ch := range m.chanList.dropAll() {
194 ch.close()
195 }
196
197 close(m.incomingChannels)
198 close(m.incomingRequests)
199 close(m.globalResponses)
200
201 m.conn.Close()
202
203 m.errCond.L.Lock()
204 m.err = err
205 m.errCond.Broadcast()
206 m.errCond.L.Unlock()
207
208 if debugMux {
209 log.Println("loop exit", err)
210 }
211}
212
213// onePacket reads and processes one packet.
214func (m *mux) onePacket() error {
215 packet, err := m.conn.readPacket()
216 if err != nil {
217 return err
218 }
219
220 if debugMux {
221 if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData {
222 log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet))
223 } else {
224 p, _ := decode(packet)
225 log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet))
226 }
227 }
228
229 switch packet[0] {
230 case msgChannelOpen:
231 return m.handleChannelOpen(packet)
232 case msgGlobalRequest, msgRequestSuccess, msgRequestFailure:
233 return m.handleGlobalPacket(packet)
234 }
235
236 // assume a channel packet.
237 if len(packet) < 5 {
238 return parseError(packet[0])
239 }
240 id := binary.BigEndian.Uint32(packet[1:])
241 ch := m.chanList.getChan(id)
242 if ch == nil {
243 return fmt.Errorf("ssh: invalid channel %d", id)
244 }
245
246 return ch.handlePacket(packet)
247}
248
249func (m *mux) handleGlobalPacket(packet []byte) error {
250 msg, err := decode(packet)
251 if err != nil {
252 return err
253 }
254
255 switch msg := msg.(type) {
256 case *globalRequestMsg:
257 m.incomingRequests <- &Request{
258 Type: msg.Type,
259 WantReply: msg.WantReply,
260 Payload: msg.Data,
261 mux: m,
262 }
263 case *globalRequestSuccessMsg, *globalRequestFailureMsg:
264 m.globalResponses <- msg
265 default:
266 panic(fmt.Sprintf("not a global message %#v", msg))
267 }
268
269 return nil
270}
271
272// handleChannelOpen schedules a channel to be Accept()ed.
273func (m *mux) handleChannelOpen(packet []byte) error {
274 var msg channelOpenMsg
275 if err := Unmarshal(packet, &msg); err != nil {
276 return err
277 }
278
279 if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
280 failMsg := channelOpenFailureMsg{
281 PeersId: msg.PeersId,
282 Reason: ConnectionFailed,
283 Message: "invalid request",
284 Language: "en_US.UTF-8",
285 }
286 return m.sendMessage(failMsg)
287 }
288
289 c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData)
290 c.remoteId = msg.PeersId
291 c.maxRemotePayload = msg.MaxPacketSize
292 c.remoteWin.add(msg.PeersWindow)
293 m.incomingChannels <- c
294 return nil
295}
296
297func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) {
298 ch, err := m.openChannel(chanType, extra)
299 if err != nil {
300 return nil, nil, err
301 }
302
303 return ch, ch.incomingRequests, nil
304}
305
306func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
307 ch := m.newChannel(chanType, channelOutbound, extra)
308
309 ch.maxIncomingPayload = channelMaxPacket
310
311 open := channelOpenMsg{
312 ChanType: chanType,
313 PeersWindow: ch.myWindow,
314 MaxPacketSize: ch.maxIncomingPayload,
315 TypeSpecificData: extra,
316 PeersId: ch.localId,
317 }
318 if err := m.sendMessage(open); err != nil {
319 return nil, err
320 }
321
322 switch msg := (<-ch.msg).(type) {
323 case *channelOpenConfirmMsg:
324 return ch, nil
325 case *channelOpenFailureMsg:
326 return nil, &OpenChannelError{msg.Reason, msg.Message}
327 default:
328 return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg)
329 }
330}
diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
new file mode 100644
index 0000000..77c84d1
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/server.go
@@ -0,0 +1,491 @@
1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "bytes"
9 "errors"
10 "fmt"
11 "io"
12 "net"
13 "strings"
14)
15
16// The Permissions type holds fine-grained permissions that are
17// specific to a user or a specific authentication method for a
18// user. Permissions, except for "source-address", must be enforced in
19// the server application layer, after successful authentication. The
20// Permissions are passed on in ServerConn so a server implementation
21// can honor them.
22type Permissions struct {
23 // Critical options restrict default permissions. Common
24 // restrictions are "source-address" and "force-command". If
25 // the server cannot enforce the restriction, or does not
26 // recognize it, the user should not authenticate.
27 CriticalOptions map[string]string
28
29 // Extensions are extra functionality that the server may
30 // offer on authenticated connections. Common extensions are
31 // "permit-agent-forwarding", "permit-X11-forwarding". Lack of
32 // support for an extension does not preclude authenticating a
33 // user.
34 Extensions map[string]string
35}
36
37// ServerConfig holds server specific configuration data.
38type ServerConfig struct {
39 // Config contains configuration shared between client and server.
40 Config
41
42 hostKeys []Signer
43
44 // NoClientAuth is true if clients are allowed to connect without
45 // authenticating.
46 NoClientAuth bool
47
48 // PasswordCallback, if non-nil, is called when a user
49 // attempts to authenticate using a password.
50 PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error)
51
52 // PublicKeyCallback, if non-nil, is called when a client attempts public
53 // key authentication. It must return true if the given public key is
54 // valid for the given user. For example, see CertChecker.Authenticate.
55 PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
56
57 // KeyboardInteractiveCallback, if non-nil, is called when
58 // keyboard-interactive authentication is selected (RFC
59 // 4256). The client object's Challenge function should be
60 // used to query the user. The callback may offer multiple
61 // Challenge rounds. To avoid information leaks, the client
62 // should be presented a challenge even if the user is
63 // unknown.
64 KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error)
65
66 // AuthLogCallback, if non-nil, is called to log all authentication
67 // attempts.
68 AuthLogCallback func(conn ConnMetadata, method string, err error)
69
70 // ServerVersion is the version identification string to announce in
71 // the public handshake.
72 // If empty, a reasonable default is used.
73 // Note that RFC 4253 section 4.2 requires that this string start with
74 // "SSH-2.0-".
75 ServerVersion string
76}
77
78// AddHostKey adds a private key as a host key. If an existing host
79// key exists with the same algorithm, it is overwritten. Each server
80// config must have at least one host key.
81func (s *ServerConfig) AddHostKey(key Signer) {
82 for i, k := range s.hostKeys {
83 if k.PublicKey().Type() == key.PublicKey().Type() {
84 s.hostKeys[i] = key
85 return
86 }
87 }
88
89 s.hostKeys = append(s.hostKeys, key)
90}
91
92// cachedPubKey contains the results of querying whether a public key is
93// acceptable for a user.
94type cachedPubKey struct {
95 user string
96 pubKeyData []byte
97 result error
98 perms *Permissions
99}
100
101const maxCachedPubKeys = 16
102
103// pubKeyCache caches tests for public keys. Since SSH clients
104// will query whether a public key is acceptable before attempting to
105// authenticate with it, we end up with duplicate queries for public
106// key validity. The cache only applies to a single ServerConn.
107type pubKeyCache struct {
108 keys []cachedPubKey
109}
110
111// get returns the result for a given user/algo/key tuple.
112func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
113 for _, k := range c.keys {
114 if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) {
115 return k, true
116 }
117 }
118 return cachedPubKey{}, false
119}
120
121// add adds the given tuple to the cache.
122func (c *pubKeyCache) add(candidate cachedPubKey) {
123 if len(c.keys) < maxCachedPubKeys {
124 c.keys = append(c.keys, candidate)
125 }
126}
127
128// ServerConn is an authenticated SSH connection, as seen from the
129// server
130type ServerConn struct {
131 Conn
132
133 // If the succeeding authentication callback returned a
134 // non-nil Permissions pointer, it is stored here.
135 Permissions *Permissions
136}
137
138// NewServerConn starts a new SSH server with c as the underlying
139// transport. It starts with a handshake and, if the handshake is
140// unsuccessful, it closes the connection and returns an error. The
141// Request and NewChannel channels must be serviced, or the connection
142// will hang.
143func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) {
144 fullConf := *config
145 fullConf.SetDefaults()
146 s := &connection{
147 sshConn: sshConn{conn: c},
148 }
149 perms, err := s.serverHandshake(&fullConf)
150 if err != nil {
151 c.Close()
152 return nil, nil, nil, err
153 }
154 return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil
155}
156
157// signAndMarshal signs the data with the appropriate algorithm,
158// and serializes the result in SSH wire format.
159func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) {
160 sig, err := k.Sign(rand, data)
161 if err != nil {
162 return nil, err
163 }
164
165 return Marshal(sig), nil
166}
167
168// handshake performs key exchange and user authentication.
169func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) {
170 if len(config.hostKeys) == 0 {
171 return nil, errors.New("ssh: server has no host keys")
172 }
173
174 if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil {
175 return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
176 }
177
178 if config.ServerVersion != "" {
179 s.serverVersion = []byte(config.ServerVersion)
180 } else {
181 s.serverVersion = []byte(packageVersion)
182 }
183 var err error
184 s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion)
185 if err != nil {
186 return nil, err
187 }
188
189 tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */)
190 s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config)
191
192 if err := s.transport.waitSession(); err != nil {
193 return nil, err
194 }
195
196 // We just did the key change, so the session ID is established.
197 s.sessionID = s.transport.getSessionID()
198
199 var packet []byte
200 if packet, err = s.transport.readPacket(); err != nil {
201 return nil, err
202 }
203
204 var serviceRequest serviceRequestMsg
205 if err = Unmarshal(packet, &serviceRequest); err != nil {
206 return nil, err
207 }
208 if serviceRequest.Service != serviceUserAuth {
209 return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating")
210 }
211 serviceAccept := serviceAcceptMsg{
212 Service: serviceUserAuth,
213 }
214 if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil {
215 return nil, err
216 }
217
218 perms, err := s.serverAuthenticate(config)
219 if err != nil {
220 return nil, err
221 }
222 s.mux = newMux(s.transport)
223 return perms, err
224}
225
226func isAcceptableAlgo(algo string) bool {
227 switch algo {
228 case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519,
229 CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01:
230 return true
231 }
232 return false
233}
234
235func checkSourceAddress(addr net.Addr, sourceAddrs string) error {
236 if addr == nil {
237 return errors.New("ssh: no address known for client, but source-address match required")
238 }
239
240 tcpAddr, ok := addr.(*net.TCPAddr)
241 if !ok {
242 return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr)
243 }
244
245 for _, sourceAddr := range strings.Split(sourceAddrs, ",") {
246 if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil {
247 if allowedIP.Equal(tcpAddr.IP) {
248 return nil
249 }
250 } else {
251 _, ipNet, err := net.ParseCIDR(sourceAddr)
252 if err != nil {
253 return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err)
254 }
255
256 if ipNet.Contains(tcpAddr.IP) {
257 return nil
258 }
259 }
260 }
261
262 return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr)
263}
264
265func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
266 sessionID := s.transport.getSessionID()
267 var cache pubKeyCache
268 var perms *Permissions
269
270userAuthLoop:
271 for {
272 var userAuthReq userAuthRequestMsg
273 if packet, err := s.transport.readPacket(); err != nil {
274 return nil, err
275 } else if err = Unmarshal(packet, &userAuthReq); err != nil {
276 return nil, err
277 }
278
279 if userAuthReq.Service != serviceSSH {
280 return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service)
281 }
282
283 s.user = userAuthReq.User
284 perms = nil
285 authErr := errors.New("no auth passed yet")
286
287 switch userAuthReq.Method {
288 case "none":
289 if config.NoClientAuth {
290 authErr = nil
291 }
292 case "password":
293 if config.PasswordCallback == nil {
294 authErr = errors.New("ssh: password auth not configured")
295 break
296 }
297 payload := userAuthReq.Payload
298 if len(payload) < 1 || payload[0] != 0 {
299 return nil, parseError(msgUserAuthRequest)
300 }
301 payload = payload[1:]
302 password, payload, ok := parseString(payload)
303 if !ok || len(payload) > 0 {
304 return nil, parseError(msgUserAuthRequest)
305 }
306
307 perms, authErr = config.PasswordCallback(s, password)
308 case "keyboard-interactive":
309 if config.KeyboardInteractiveCallback == nil {
310 authErr = errors.New("ssh: keyboard-interactive auth not configubred")
311 break
312 }
313
314 prompter := &sshClientKeyboardInteractive{s}
315 perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge)
316 case "publickey":
317 if config.PublicKeyCallback == nil {
318 authErr = errors.New("ssh: publickey auth not configured")
319 break
320 }
321 payload := userAuthReq.Payload
322 if len(payload) < 1 {
323 return nil, parseError(msgUserAuthRequest)
324 }
325 isQuery := payload[0] == 0
326 payload = payload[1:]
327 algoBytes, payload, ok := parseString(payload)
328 if !ok {
329 return nil, parseError(msgUserAuthRequest)
330 }
331 algo := string(algoBytes)
332 if !isAcceptableAlgo(algo) {
333 authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
334 break
335 }
336
337 pubKeyData, payload, ok := parseString(payload)
338 if !ok {
339 return nil, parseError(msgUserAuthRequest)
340 }
341
342 pubKey, err := ParsePublicKey(pubKeyData)
343 if err != nil {
344 return nil, err
345 }
346
347 candidate, ok := cache.get(s.user, pubKeyData)
348 if !ok {
349 candidate.user = s.user
350 candidate.pubKeyData = pubKeyData
351 candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey)
352 if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" {
353 candidate.result = checkSourceAddress(
354 s.RemoteAddr(),
355 candidate.perms.CriticalOptions[sourceAddressCriticalOption])
356 }
357 cache.add(candidate)
358 }
359
360 if isQuery {
361 // The client can query if the given public key
362 // would be okay.
363 if len(payload) > 0 {
364 return nil, parseError(msgUserAuthRequest)
365 }
366
367 if candidate.result == nil {
368 okMsg := userAuthPubKeyOkMsg{
369 Algo: algo,
370 PubKey: pubKeyData,
371 }
372 if err = s.transport.writePacket(Marshal(&okMsg)); err != nil {
373 return nil, err
374 }
375 continue userAuthLoop
376 }
377 authErr = candidate.result
378 } else {
379 sig, payload, ok := parseSignature(payload)
380 if !ok || len(payload) > 0 {
381 return nil, parseError(msgUserAuthRequest)
382 }
383 // Ensure the public key algo and signature algo
384 // are supported. Compare the private key
385 // algorithm name that corresponds to algo with
386 // sig.Format. This is usually the same, but
387 // for certs, the names differ.
388 if !isAcceptableAlgo(sig.Format) {
389 break
390 }
391 signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData)
392
393 if err := pubKey.Verify(signedData, sig); err != nil {
394 return nil, err
395 }
396
397 authErr = candidate.result
398 perms = candidate.perms
399 }
400 default:
401 authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method)
402 }
403
404 if config.AuthLogCallback != nil {
405 config.AuthLogCallback(s, userAuthReq.Method, authErr)
406 }
407
408 if authErr == nil {
409 break userAuthLoop
410 }
411
412 var failureMsg userAuthFailureMsg
413 if config.PasswordCallback != nil {
414 failureMsg.Methods = append(failureMsg.Methods, "password")
415 }
416 if config.PublicKeyCallback != nil {
417 failureMsg.Methods = append(failureMsg.Methods, "publickey")
418 }
419 if config.KeyboardInteractiveCallback != nil {
420 failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive")
421 }
422
423 if len(failureMsg.Methods) == 0 {
424 return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
425 }
426
427 if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil {
428 return nil, err
429 }
430 }
431
432 if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil {
433 return nil, err
434 }
435 return perms, nil
436}
437
438// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by
439// asking the client on the other side of a ServerConn.
440type sshClientKeyboardInteractive struct {
441 *connection
442}
443
444func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
445 if len(questions) != len(echos) {
446 return nil, errors.New("ssh: echos and questions must have equal length")
447 }
448
449 var prompts []byte
450 for i := range questions {
451 prompts = appendString(prompts, questions[i])
452 prompts = appendBool(prompts, echos[i])
453 }
454
455 if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{
456 Instruction: instruction,
457 NumPrompts: uint32(len(questions)),
458 Prompts: prompts,
459 })); err != nil {
460 return nil, err
461 }
462
463 packet, err := c.transport.readPacket()
464 if err != nil {
465 return nil, err
466 }
467 if packet[0] != msgUserAuthInfoResponse {
468 return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0])
469 }
470 packet = packet[1:]
471
472 n, packet, ok := parseUint32(packet)
473 if !ok || int(n) != len(questions) {
474 return nil, parseError(msgUserAuthInfoResponse)
475 }
476
477 for i := uint32(0); i < n; i++ {
478 ans, rest, ok := parseString(packet)
479 if !ok {
480 return nil, parseError(msgUserAuthInfoResponse)
481 }
482
483 answers = append(answers, string(ans))
484 packet = rest
485 }
486 if len(packet) != 0 {
487 return nil, errors.New("ssh: junk at end of message")
488 }
489
490 return answers, nil
491}
diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go
new file mode 100644
index 0000000..17e2aa8
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/session.go
@@ -0,0 +1,627 @@
1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7// Session implements an interactive session described in
8// "RFC 4254, section 6".
9
10import (
11 "bytes"
12 "encoding/binary"
13 "errors"
14 "fmt"
15 "io"
16 "io/ioutil"
17 "sync"
18)
19
20type Signal string
21
22// POSIX signals as listed in RFC 4254 Section 6.10.
23const (
24 SIGABRT Signal = "ABRT"
25 SIGALRM Signal = "ALRM"
26 SIGFPE Signal = "FPE"
27 SIGHUP Signal = "HUP"
28 SIGILL Signal = "ILL"
29 SIGINT Signal = "INT"
30 SIGKILL Signal = "KILL"
31 SIGPIPE Signal = "PIPE"
32 SIGQUIT Signal = "QUIT"
33 SIGSEGV Signal = "SEGV"
34 SIGTERM Signal = "TERM"
35 SIGUSR1 Signal = "USR1"
36 SIGUSR2 Signal = "USR2"
37)
38
39var signals = map[Signal]int{
40 SIGABRT: 6,
41 SIGALRM: 14,
42 SIGFPE: 8,
43 SIGHUP: 1,
44 SIGILL: 4,
45 SIGINT: 2,
46 SIGKILL: 9,
47 SIGPIPE: 13,
48 SIGQUIT: 3,
49 SIGSEGV: 11,
50 SIGTERM: 15,
51}
52
53type TerminalModes map[uint8]uint32
54
55// POSIX terminal mode flags as listed in RFC 4254 Section 8.
56const (
57 tty_OP_END = 0
58 VINTR = 1
59 VQUIT = 2
60 VERASE = 3
61 VKILL = 4
62 VEOF = 5
63 VEOL = 6
64 VEOL2 = 7
65 VSTART = 8
66 VSTOP = 9
67 VSUSP = 10
68 VDSUSP = 11
69 VREPRINT = 12
70 VWERASE = 13
71 VLNEXT = 14
72 VFLUSH = 15
73 VSWTCH = 16
74 VSTATUS = 17
75 VDISCARD = 18
76 IGNPAR = 30
77 PARMRK = 31
78 INPCK = 32
79 ISTRIP = 33
80 INLCR = 34
81 IGNCR = 35
82 ICRNL = 36
83 IUCLC = 37
84 IXON = 38
85 IXANY = 39
86 IXOFF = 40
87 IMAXBEL = 41
88 ISIG = 50
89 ICANON = 51
90 XCASE = 52
91 ECHO = 53
92 ECHOE = 54
93 ECHOK = 55
94 ECHONL = 56
95 NOFLSH = 57
96 TOSTOP = 58
97 IEXTEN = 59
98 ECHOCTL = 60
99 ECHOKE = 61
100 PENDIN = 62
101 OPOST = 70
102 OLCUC = 71
103 ONLCR = 72
104 OCRNL = 73
105 ONOCR = 74
106 ONLRET = 75
107 CS7 = 90
108 CS8 = 91
109 PARENB = 92
110 PARODD = 93
111 TTY_OP_ISPEED = 128
112 TTY_OP_OSPEED = 129
113)
114
115// A Session represents a connection to a remote command or shell.
116type Session struct {
117 // Stdin specifies the remote process's standard input.
118 // If Stdin is nil, the remote process reads from an empty
119 // bytes.Buffer.
120 Stdin io.Reader
121
122 // Stdout and Stderr specify the remote process's standard
123 // output and error.
124 //
125 // If either is nil, Run connects the corresponding file
126 // descriptor to an instance of ioutil.Discard. There is a
127 // fixed amount of buffering that is shared for the two streams.
128 // If either blocks it may eventually cause the remote
129 // command to block.
130 Stdout io.Writer
131 Stderr io.Writer
132
133 ch Channel // the channel backing this session
134 started bool // true once Start, Run or Shell is invoked.
135 copyFuncs []func() error
136 errors chan error // one send per copyFunc
137
138 // true if pipe method is active
139 stdinpipe, stdoutpipe, stderrpipe bool
140
141 // stdinPipeWriter is non-nil if StdinPipe has not been called
142 // and Stdin was specified by the user; it is the write end of
143 // a pipe connecting Session.Stdin to the stdin channel.
144 stdinPipeWriter io.WriteCloser
145
146 exitStatus chan error
147}
148
149// SendRequest sends an out-of-band channel request on the SSH channel
150// underlying the session.
151func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
152 return s.ch.SendRequest(name, wantReply, payload)
153}
154
155func (s *Session) Close() error {
156 return s.ch.Close()
157}
158
159// RFC 4254 Section 6.4.
160type setenvRequest struct {
161 Name string
162 Value string
163}
164
165// Setenv sets an environment variable that will be applied to any
166// command executed by Shell or Run.
167func (s *Session) Setenv(name, value string) error {
168 msg := setenvRequest{
169 Name: name,
170 Value: value,
171 }
172 ok, err := s.ch.SendRequest("env", true, Marshal(&msg))
173 if err == nil && !ok {
174 err = errors.New("ssh: setenv failed")
175 }
176 return err
177}
178
179// RFC 4254 Section 6.2.
180type ptyRequestMsg struct {
181 Term string
182 Columns uint32
183 Rows uint32
184 Width uint32
185 Height uint32
186 Modelist string
187}
188
189// RequestPty requests the association of a pty with the session on the remote host.
190func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error {
191 var tm []byte
192 for k, v := range termmodes {
193 kv := struct {
194 Key byte
195 Val uint32
196 }{k, v}
197
198 tm = append(tm, Marshal(&kv)...)
199 }
200 tm = append(tm, tty_OP_END)
201 req := ptyRequestMsg{
202 Term: term,
203 Columns: uint32(w),
204 Rows: uint32(h),
205 Width: uint32(w * 8),
206 Height: uint32(h * 8),
207 Modelist: string(tm),
208 }
209 ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req))
210 if err == nil && !ok {
211 err = errors.New("ssh: pty-req failed")
212 }
213 return err
214}
215
216// RFC 4254 Section 6.5.
217type subsystemRequestMsg struct {
218 Subsystem string
219}
220
221// RequestSubsystem requests the association of a subsystem with the session on the remote host.
222// A subsystem is a predefined command that runs in the background when the ssh session is initiated
223func (s *Session) RequestSubsystem(subsystem string) error {
224 msg := subsystemRequestMsg{
225 Subsystem: subsystem,
226 }
227 ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg))
228 if err == nil && !ok {
229 err = errors.New("ssh: subsystem request failed")
230 }
231 return err
232}
233
234// RFC 4254 Section 6.9.
235type signalMsg struct {
236 Signal string
237}
238
239// Signal sends the given signal to the remote process.
240// sig is one of the SIG* constants.
241func (s *Session) Signal(sig Signal) error {
242 msg := signalMsg{
243 Signal: string(sig),
244 }
245
246 _, err := s.ch.SendRequest("signal", false, Marshal(&msg))
247 return err
248}
249
250// RFC 4254 Section 6.5.
251type execMsg struct {
252 Command string
253}
254
255// Start runs cmd on the remote host. Typically, the remote
256// server passes cmd to the shell for interpretation.
257// A Session only accepts one call to Run, Start or Shell.
258func (s *Session) Start(cmd string) error {
259 if s.started {
260 return errors.New("ssh: session already started")
261 }
262 req := execMsg{
263 Command: cmd,
264 }
265
266 ok, err := s.ch.SendRequest("exec", true, Marshal(&req))
267 if err == nil && !ok {
268 err = fmt.Errorf("ssh: command %v failed", cmd)
269 }
270 if err != nil {
271 return err
272 }
273 return s.start()
274}
275
276// Run runs cmd on the remote host. Typically, the remote
277// server passes cmd to the shell for interpretation.
278// A Session only accepts one call to Run, Start, Shell, Output,
279// or CombinedOutput.
280//
281// The returned error is nil if the command runs, has no problems
282// copying stdin, stdout, and stderr, and exits with a zero exit
283// status.
284//
285// If the remote server does not send an exit status, an error of type
286// *ExitMissingError is returned. If the command completes
287// unsuccessfully or is interrupted by a signal, the error is of type
288// *ExitError. Other error types may be returned for I/O problems.
289func (s *Session) Run(cmd string) error {
290 err := s.Start(cmd)
291 if err != nil {
292 return err
293 }
294 return s.Wait()
295}
296
297// Output runs cmd on the remote host and returns its standard output.
298func (s *Session) Output(cmd string) ([]byte, error) {
299 if s.Stdout != nil {
300 return nil, errors.New("ssh: Stdout already set")
301 }
302 var b bytes.Buffer
303 s.Stdout = &b
304 err := s.Run(cmd)
305 return b.Bytes(), err
306}
307
308type singleWriter struct {
309 b bytes.Buffer
310 mu sync.Mutex
311}
312
313func (w *singleWriter) Write(p []byte) (int, error) {
314 w.mu.Lock()
315 defer w.mu.Unlock()
316 return w.b.Write(p)
317}
318
319// CombinedOutput runs cmd on the remote host and returns its combined
320// standard output and standard error.
321func (s *Session) CombinedOutput(cmd string) ([]byte, error) {
322 if s.Stdout != nil {
323 return nil, errors.New("ssh: Stdout already set")
324 }
325 if s.Stderr != nil {
326 return nil, errors.New("ssh: Stderr already set")
327 }
328 var b singleWriter
329 s.Stdout = &b
330 s.Stderr = &b
331 err := s.Run(cmd)
332 return b.b.Bytes(), err
333}
334
335// Shell starts a login shell on the remote host. A Session only
336// accepts one call to Run, Start, Shell, Output, or CombinedOutput.
337func (s *Session) Shell() error {
338 if s.started {
339 return errors.New("ssh: session already started")
340 }
341
342 ok, err := s.ch.SendRequest("shell", true, nil)
343 if err == nil && !ok {
344 return errors.New("ssh: could not start shell")
345 }
346 if err != nil {
347 return err
348 }
349 return s.start()
350}
351
352func (s *Session) start() error {
353 s.started = true
354
355 type F func(*Session)
356 for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} {
357 setupFd(s)
358 }
359
360 s.errors = make(chan error, len(s.copyFuncs))
361 for _, fn := range s.copyFuncs {
362 go func(fn func() error) {
363 s.errors <- fn()
364 }(fn)
365 }
366 return nil
367}
368
369// Wait waits for the remote command to exit.
370//
371// The returned error is nil if the command runs, has no problems
372// copying stdin, stdout, and stderr, and exits with a zero exit
373// status.
374//
375// If the remote server does not send an exit status, an error of type
376// *ExitMissingError is returned. If the command completes
377// unsuccessfully or is interrupted by a signal, the error is of type
378// *ExitError. Other error types may be returned for I/O problems.
379func (s *Session) Wait() error {
380 if !s.started {
381 return errors.New("ssh: session not started")
382 }
383 waitErr := <-s.exitStatus
384
385 if s.stdinPipeWriter != nil {
386 s.stdinPipeWriter.Close()
387 }
388 var copyError error
389 for _ = range s.copyFuncs {
390 if err := <-s.errors; err != nil && copyError == nil {
391 copyError = err
392 }
393 }
394 if waitErr != nil {
395 return waitErr
396 }
397 return copyError
398}
399
400func (s *Session) wait(reqs <-chan *Request) error {
401 wm := Waitmsg{status: -1}
402 // Wait for msg channel to be closed before returning.
403 for msg := range reqs {
404 switch msg.Type {
405 case "exit-status":
406 wm.status = int(binary.BigEndian.Uint32(msg.Payload))
407 case "exit-signal":
408 var sigval struct {
409 Signal string
410 CoreDumped bool
411 Error string
412 Lang string
413 }
414 if err := Unmarshal(msg.Payload, &sigval); err != nil {
415 return err
416 }
417
418 // Must sanitize strings?
419 wm.signal = sigval.Signal
420 wm.msg = sigval.Error
421 wm.lang = sigval.Lang
422 default:
423 // This handles keepalives and matches
424 // OpenSSH's behaviour.
425 if msg.WantReply {
426 msg.Reply(false, nil)
427 }
428 }
429 }
430 if wm.status == 0 {
431 return nil
432 }
433 if wm.status == -1 {
434 // exit-status was never sent from server
435 if wm.signal == "" {
436 // signal was not sent either. RFC 4254
437 // section 6.10 recommends against this
438 // behavior, but it is allowed, so we let
439 // clients handle it.
440 return &ExitMissingError{}
441 }
442 wm.status = 128
443 if _, ok := signals[Signal(wm.signal)]; ok {
444 wm.status += signals[Signal(wm.signal)]
445 }
446 }
447
448 return &ExitError{wm}
449}
450
451// ExitMissingError is returned if a session is torn down cleanly, but
452// the server sends no confirmation of the exit status.
453type ExitMissingError struct{}
454
455func (e *ExitMissingError) Error() string {
456 return "wait: remote command exited without exit status or exit signal"
457}
458
459func (s *Session) stdin() {
460 if s.stdinpipe {
461 return
462 }
463 var stdin io.Reader
464 if s.Stdin == nil {
465 stdin = new(bytes.Buffer)
466 } else {
467 r, w := io.Pipe()
468 go func() {
469 _, err := io.Copy(w, s.Stdin)
470 w.CloseWithError(err)
471 }()
472 stdin, s.stdinPipeWriter = r, w
473 }
474 s.copyFuncs = append(s.copyFuncs, func() error {
475 _, err := io.Copy(s.ch, stdin)
476 if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF {
477 err = err1
478 }
479 return err
480 })
481}
482
483func (s *Session) stdout() {
484 if s.stdoutpipe {
485 return
486 }
487 if s.Stdout == nil {
488 s.Stdout = ioutil.Discard
489 }
490 s.copyFuncs = append(s.copyFuncs, func() error {
491 _, err := io.Copy(s.Stdout, s.ch)
492 return err
493 })
494}
495
496func (s *Session) stderr() {
497 if s.stderrpipe {
498 return
499 }
500 if s.Stderr == nil {
501 s.Stderr = ioutil.Discard
502 }
503 s.copyFuncs = append(s.copyFuncs, func() error {
504 _, err := io.Copy(s.Stderr, s.ch.Stderr())
505 return err
506 })
507}
508
509// sessionStdin reroutes Close to CloseWrite.
510type sessionStdin struct {
511 io.Writer
512 ch Channel
513}
514
515func (s *sessionStdin) Close() error {
516 return s.ch.CloseWrite()
517}
518
519// StdinPipe returns a pipe that will be connected to the
520// remote command's standard input when the command starts.
521func (s *Session) StdinPipe() (io.WriteCloser, error) {
522 if s.Stdin != nil {
523 return nil, errors.New("ssh: Stdin already set")
524 }
525 if s.started {
526 return nil, errors.New("ssh: StdinPipe after process started")
527 }
528 s.stdinpipe = true
529 return &sessionStdin{s.ch, s.ch}, nil
530}
531
532// StdoutPipe returns a pipe that will be connected to the
533// remote command's standard output when the command starts.
534// There is a fixed amount of buffering that is shared between
535// stdout and stderr streams. If the StdoutPipe reader is
536// not serviced fast enough it may eventually cause the
537// remote command to block.
538func (s *Session) StdoutPipe() (io.Reader, error) {
539 if s.Stdout != nil {
540 return nil, errors.New("ssh: Stdout already set")
541 }
542 if s.started {
543 return nil, errors.New("ssh: StdoutPipe after process started")
544 }
545 s.stdoutpipe = true
546 return s.ch, nil
547}
548
549// StderrPipe returns a pipe that will be connected to the
550// remote command's standard error when the command starts.
551// There is a fixed amount of buffering that is shared between
552// stdout and stderr streams. If the StderrPipe reader is
553// not serviced fast enough it may eventually cause the
554// remote command to block.
555func (s *Session) StderrPipe() (io.Reader, error) {
556 if s.Stderr != nil {
557 return nil, errors.New("ssh: Stderr already set")
558 }
559 if s.started {
560 return nil, errors.New("ssh: StderrPipe after process started")
561 }
562 s.stderrpipe = true
563 return s.ch.Stderr(), nil
564}
565
566// newSession returns a new interactive session on the remote host.
567func newSession(ch Channel, reqs <-chan *Request) (*Session, error) {
568 s := &Session{
569 ch: ch,
570 }
571 s.exitStatus = make(chan error, 1)
572 go func() {
573 s.exitStatus <- s.wait(reqs)
574 }()
575
576 return s, nil
577}
578
579// An ExitError reports unsuccessful completion of a remote command.
580type ExitError struct {
581 Waitmsg
582}
583
584func (e *ExitError) Error() string {
585 return e.Waitmsg.String()
586}
587
588// Waitmsg stores the information about an exited remote command
589// as reported by Wait.
590type Waitmsg struct {
591 status int
592 signal string
593 msg string
594 lang string
595}
596
597// ExitStatus returns the exit status of the remote command.
598func (w Waitmsg) ExitStatus() int {
599 return w.status
600}
601
602// Signal returns the exit signal of the remote command if
603// it was terminated violently.
604func (w Waitmsg) Signal() string {
605 return w.signal
606}
607
608// Msg returns the exit message given by the remote command
609func (w Waitmsg) Msg() string {
610 return w.msg
611}
612
613// Lang returns the language tag. See RFC 3066
614func (w Waitmsg) Lang() string {
615 return w.lang
616}
617
618func (w Waitmsg) String() string {
619 str := fmt.Sprintf("Process exited with status %v", w.status)
620 if w.signal != "" {
621 str += fmt.Sprintf(" from signal %v", w.signal)
622 }
623 if w.msg != "" {
624 str += fmt.Sprintf(". Reason was: %v", w.msg)
625 }
626 return str
627}
diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go
new file mode 100644
index 0000000..6151241
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/tcpip.go
@@ -0,0 +1,407 @@
1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "errors"
9 "fmt"
10 "io"
11 "math/rand"
12 "net"
13 "strconv"
14 "strings"
15 "sync"
16 "time"
17)
18
19// Listen requests the remote peer open a listening socket on
20// addr. Incoming connections will be available by calling Accept on
21// the returned net.Listener. The listener must be serviced, or the
22// SSH connection may hang.
23func (c *Client) Listen(n, addr string) (net.Listener, error) {
24 laddr, err := net.ResolveTCPAddr(n, addr)
25 if err != nil {
26 return nil, err
27 }
28 return c.ListenTCP(laddr)
29}
30
31// Automatic port allocation is broken with OpenSSH before 6.0. See
32// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In
33// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0,
34// rather than the actual port number. This means you can never open
35// two different listeners with auto allocated ports. We work around
36// this by trying explicit ports until we succeed.
37
38const openSSHPrefix = "OpenSSH_"
39
40var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano()))
41
42// isBrokenOpenSSHVersion returns true if the given version string
43// specifies a version of OpenSSH that is known to have a bug in port
44// forwarding.
45func isBrokenOpenSSHVersion(versionStr string) bool {
46 i := strings.Index(versionStr, openSSHPrefix)
47 if i < 0 {
48 return false
49 }
50 i += len(openSSHPrefix)
51 j := i
52 for ; j < len(versionStr); j++ {
53 if versionStr[j] < '0' || versionStr[j] > '9' {
54 break
55 }
56 }
57 version, _ := strconv.Atoi(versionStr[i:j])
58 return version < 6
59}
60
61// autoPortListenWorkaround simulates automatic port allocation by
62// trying random ports repeatedly.
63func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) {
64 var sshListener net.Listener
65 var err error
66 const tries = 10
67 for i := 0; i < tries; i++ {
68 addr := *laddr
69 addr.Port = 1024 + portRandomizer.Intn(60000)
70 sshListener, err = c.ListenTCP(&addr)
71 if err == nil {
72 laddr.Port = addr.Port
73 return sshListener, err
74 }
75 }
76 return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err)
77}
78
79// RFC 4254 7.1
80type channelForwardMsg struct {
81 addr string
82 rport uint32
83}
84
85// ListenTCP requests the remote peer open a listening socket
86// on laddr. Incoming connections will be available by calling
87// Accept on the returned net.Listener.
88func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
89 if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) {
90 return c.autoPortListenWorkaround(laddr)
91 }
92
93 m := channelForwardMsg{
94 laddr.IP.String(),
95 uint32(laddr.Port),
96 }
97 // send message
98 ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m))
99 if err != nil {
100 return nil, err
101 }
102 if !ok {
103 return nil, errors.New("ssh: tcpip-forward request denied by peer")
104 }
105
106 // If the original port was 0, then the remote side will
107 // supply a real port number in the response.
108 if laddr.Port == 0 {
109 var p struct {
110 Port uint32
111 }
112 if err := Unmarshal(resp, &p); err != nil {
113 return nil, err
114 }
115 laddr.Port = int(p.Port)
116 }
117
118 // Register this forward, using the port number we obtained.
119 ch := c.forwards.add(*laddr)
120
121 return &tcpListener{laddr, c, ch}, nil
122}
123
124// forwardList stores a mapping between remote
125// forward requests and the tcpListeners.
126type forwardList struct {
127 sync.Mutex
128 entries []forwardEntry
129}
130
131// forwardEntry represents an established mapping of a laddr on a
132// remote ssh server to a channel connected to a tcpListener.
133type forwardEntry struct {
134 laddr net.TCPAddr
135 c chan forward
136}
137
138// forward represents an incoming forwarded tcpip connection. The
139// arguments to add/remove/lookup should be address as specified in
140// the original forward-request.
141type forward struct {
142 newCh NewChannel // the ssh client channel underlying this forward
143 raddr *net.TCPAddr // the raddr of the incoming connection
144}
145
146func (l *forwardList) add(addr net.TCPAddr) chan forward {
147 l.Lock()
148 defer l.Unlock()
149 f := forwardEntry{
150 addr,
151 make(chan forward, 1),
152 }
153 l.entries = append(l.entries, f)
154 return f.c
155}
156
157// See RFC 4254, section 7.2
158type forwardedTCPPayload struct {
159 Addr string
160 Port uint32
161 OriginAddr string
162 OriginPort uint32
163}
164
165// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr.
166func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) {
167 if port == 0 || port > 65535 {
168 return nil, fmt.Errorf("ssh: port number out of range: %d", port)
169 }
170 ip := net.ParseIP(string(addr))
171 if ip == nil {
172 return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr)
173 }
174 return &net.TCPAddr{IP: ip, Port: int(port)}, nil
175}
176
177func (l *forwardList) handleChannels(in <-chan NewChannel) {
178 for ch := range in {
179 var payload forwardedTCPPayload
180 if err := Unmarshal(ch.ExtraData(), &payload); err != nil {
181 ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error())
182 continue
183 }
184
185 // RFC 4254 section 7.2 specifies that incoming
186 // addresses should list the address, in string
187 // format. It is implied that this should be an IP
188 // address, as it would be impossible to connect to it
189 // otherwise.
190 laddr, err := parseTCPAddr(payload.Addr, payload.Port)
191 if err != nil {
192 ch.Reject(ConnectionFailed, err.Error())
193 continue
194 }
195 raddr, err := parseTCPAddr(payload.OriginAddr, payload.OriginPort)
196 if err != nil {
197 ch.Reject(ConnectionFailed, err.Error())
198 continue
199 }
200
201 if ok := l.forward(*laddr, *raddr, ch); !ok {
202 // Section 7.2, implementations MUST reject spurious incoming
203 // connections.
204 ch.Reject(Prohibited, "no forward for address")
205 continue
206 }
207 }
208}
209
210// remove removes the forward entry, and the channel feeding its
211// listener.
212func (l *forwardList) remove(addr net.TCPAddr) {
213 l.Lock()
214 defer l.Unlock()
215 for i, f := range l.entries {
216 if addr.IP.Equal(f.laddr.IP) && addr.Port == f.laddr.Port {
217 l.entries = append(l.entries[:i], l.entries[i+1:]...)
218 close(f.c)
219 return
220 }
221 }
222}
223
224// closeAll closes and clears all forwards.
225func (l *forwardList) closeAll() {
226 l.Lock()
227 defer l.Unlock()
228 for _, f := range l.entries {
229 close(f.c)
230 }
231 l.entries = nil
232}
233
234func (l *forwardList) forward(laddr, raddr net.TCPAddr, ch NewChannel) bool {
235 l.Lock()
236 defer l.Unlock()
237 for _, f := range l.entries {
238 if laddr.IP.Equal(f.laddr.IP) && laddr.Port == f.laddr.Port {
239 f.c <- forward{ch, &raddr}
240 return true
241 }
242 }
243 return false
244}
245
246type tcpListener struct {
247 laddr *net.TCPAddr
248
249 conn *Client
250 in <-chan forward
251}
252
253// Accept waits for and returns the next connection to the listener.
254func (l *tcpListener) Accept() (net.Conn, error) {
255 s, ok := <-l.in
256 if !ok {
257 return nil, io.EOF
258 }
259 ch, incoming, err := s.newCh.Accept()
260 if err != nil {
261 return nil, err
262 }
263 go DiscardRequests(incoming)
264
265 return &tcpChanConn{
266 Channel: ch,
267 laddr: l.laddr,
268 raddr: s.raddr,
269 }, nil
270}
271
272// Close closes the listener.
273func (l *tcpListener) Close() error {
274 m := channelForwardMsg{
275 l.laddr.IP.String(),
276 uint32(l.laddr.Port),
277 }
278
279 // this also closes the listener.
280 l.conn.forwards.remove(*l.laddr)
281 ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m))
282 if err == nil && !ok {
283 err = errors.New("ssh: cancel-tcpip-forward failed")
284 }
285 return err
286}
287
288// Addr returns the listener's network address.
289func (l *tcpListener) Addr() net.Addr {
290 return l.laddr
291}
292
293// Dial initiates a connection to the addr from the remote host.
294// The resulting connection has a zero LocalAddr() and RemoteAddr().
295func (c *Client) Dial(n, addr string) (net.Conn, error) {
296 // Parse the address into host and numeric port.
297 host, portString, err := net.SplitHostPort(addr)
298 if err != nil {
299 return nil, err
300 }
301 port, err := strconv.ParseUint(portString, 10, 16)
302 if err != nil {
303 return nil, err
304 }
305 // Use a zero address for local and remote address.
306 zeroAddr := &net.TCPAddr{
307 IP: net.IPv4zero,
308 Port: 0,
309 }
310 ch, err := c.dial(net.IPv4zero.String(), 0, host, int(port))
311 if err != nil {
312 return nil, err
313 }
314 return &tcpChanConn{
315 Channel: ch,
316 laddr: zeroAddr,
317 raddr: zeroAddr,
318 }, nil
319}
320
321// DialTCP connects to the remote address raddr on the network net,
322// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used
323// as the local address for the connection.
324func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) {
325 if laddr == nil {
326 laddr = &net.TCPAddr{
327 IP: net.IPv4zero,
328 Port: 0,
329 }
330 }
331 ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port)
332 if err != nil {
333 return nil, err
334 }
335 return &tcpChanConn{
336 Channel: ch,
337 laddr: laddr,
338 raddr: raddr,
339 }, nil
340}
341
342// RFC 4254 7.2
343type channelOpenDirectMsg struct {
344 raddr string
345 rport uint32
346 laddr string
347 lport uint32
348}
349
350func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) {
351 msg := channelOpenDirectMsg{
352 raddr: raddr,
353 rport: uint32(rport),
354 laddr: laddr,
355 lport: uint32(lport),
356 }
357 ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg))
358 if err != nil {
359 return nil, err
360 }
361 go DiscardRequests(in)
362 return ch, err
363}
364
365type tcpChan struct {
366 Channel // the backing channel
367}
368
369// tcpChanConn fulfills the net.Conn interface without
370// the tcpChan having to hold laddr or raddr directly.
371type tcpChanConn struct {
372 Channel
373 laddr, raddr net.Addr
374}
375
376// LocalAddr returns the local network address.
377func (t *tcpChanConn) LocalAddr() net.Addr {
378 return t.laddr
379}
380
381// RemoteAddr returns the remote network address.
382func (t *tcpChanConn) RemoteAddr() net.Addr {
383 return t.raddr
384}
385
386// SetDeadline sets the read and write deadlines associated
387// with the connection.
388func (t *tcpChanConn) SetDeadline(deadline time.Time) error {
389 if err := t.SetReadDeadline(deadline); err != nil {
390 return err
391 }
392 return t.SetWriteDeadline(deadline)
393}
394
395// SetReadDeadline sets the read deadline.
396// A zero value for t means Read will not time out.
397// After the deadline, the error from Read will implement net.Error
398// with Timeout() == true.
399func (t *tcpChanConn) SetReadDeadline(deadline time.Time) error {
400 return errors.New("ssh: tcpChan: deadline not supported")
401}
402
403// SetWriteDeadline exists to satisfy the net.Conn interface
404// but is not implemented by this type. It always returns an error.
405func (t *tcpChanConn) SetWriteDeadline(deadline time.Time) error {
406 return errors.New("ssh: tcpChan: deadline not supported")
407}
diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go
new file mode 100644
index 0000000..f9780e0
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/transport.go
@@ -0,0 +1,375 @@
1// Copyright 2011 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package ssh
6
7import (
8 "bufio"
9 "errors"
10 "io"
11 "log"
12)
13
14// debugTransport if set, will print packet types as they go over the
15// wire. No message decoding is done, to minimize the impact on timing.
16const debugTransport = false
17
18const (
19 gcmCipherID = "aes128-gcm@openssh.com"
20 aes128cbcID = "aes128-cbc"
21 tripledescbcID = "3des-cbc"
22)
23
24// packetConn represents a transport that implements packet based
25// operations.
26type packetConn interface {
27 // Encrypt and send a packet of data to the remote peer.
28 writePacket(packet []byte) error
29
30 // Read a packet from the connection. The read is blocking,
31 // i.e. if error is nil, then the returned byte slice is
32 // always non-empty.
33 readPacket() ([]byte, error)
34
35 // Close closes the write-side of the connection.
36 Close() error
37}
38
39// transport is the keyingTransport that implements the SSH packet
40// protocol.
41type transport struct {
42 reader connectionState
43 writer connectionState
44
45 bufReader *bufio.Reader
46 bufWriter *bufio.Writer
47 rand io.Reader
48 isClient bool
49 io.Closer
50}
51
52// packetCipher represents a combination of SSH encryption/MAC
53// protocol. A single instance should be used for one direction only.
54type packetCipher interface {
55 // writePacket encrypts the packet and writes it to w. The
56 // contents of the packet are generally scrambled.
57 writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error
58
59 // readPacket reads and decrypts a packet of data. The
60 // returned packet may be overwritten by future calls of
61 // readPacket.
62 readPacket(seqnum uint32, r io.Reader) ([]byte, error)
63}
64
65// connectionState represents one side (read or write) of the
66// connection. This is necessary because each direction has its own
67// keys, and can even have its own algorithms
68type connectionState struct {
69 packetCipher
70 seqNum uint32
71 dir direction
72 pendingKeyChange chan packetCipher
73}
74
75// prepareKeyChange sets up key material for a keychange. The key changes in
76// both directions are triggered by reading and writing a msgNewKey packet
77// respectively.
78func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error {
79 if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil {
80 return err
81 } else {
82 t.reader.pendingKeyChange <- ciph
83 }
84
85 if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil {
86 return err
87 } else {
88 t.writer.pendingKeyChange <- ciph
89 }
90
91 return nil
92}
93
94func (t *transport) printPacket(p []byte, write bool) {
95 if len(p) == 0 {
96 return
97 }
98 who := "server"
99 if t.isClient {
100 who = "client"
101 }
102 what := "read"
103 if write {
104 what = "write"
105 }
106
107 log.Println(what, who, p[0])
108}
109
110// Read and decrypt next packet.
111func (t *transport) readPacket() (p []byte, err error) {
112 for {
113 p, err = t.reader.readPacket(t.bufReader)
114 if err != nil {
115 break
116 }
117 if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) {
118 break
119 }
120 }
121 if debugTransport {
122 t.printPacket(p, false)
123 }
124
125 return p, err
126}
127
128func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
129 packet, err := s.packetCipher.readPacket(s.seqNum, r)
130 s.seqNum++
131 if err == nil && len(packet) == 0 {
132 err = errors.New("ssh: zero length packet")
133 }
134
135 if len(packet) > 0 {
136 switch packet[0] {
137 case msgNewKeys:
138 select {
139 case cipher := <-s.pendingKeyChange:
140 s.packetCipher = cipher
141 default:
142 return nil, errors.New("ssh: got bogus newkeys message.")
143 }
144
145 case msgDisconnect:
146 // Transform a disconnect message into an
147 // error. Since this is lowest level at which
148 // we interpret message types, doing it here
149 // ensures that we don't have to handle it
150 // elsewhere.
151 var msg disconnectMsg
152 if err := Unmarshal(packet, &msg); err != nil {
153 return nil, err
154 }
155 return nil, &msg
156 }
157 }
158
159 // The packet may point to an internal buffer, so copy the
160 // packet out here.
161 fresh := make([]byte, len(packet))
162 copy(fresh, packet)
163
164 return fresh, err
165}
166
167func (t *transport) writePacket(packet []byte) error {
168 if debugTransport {
169 t.printPacket(packet, true)
170 }
171 return t.writer.writePacket(t.bufWriter, t.rand, packet)
172}
173
174func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error {
175 changeKeys := len(packet) > 0 && packet[0] == msgNewKeys
176
177 err := s.packetCipher.writePacket(s.seqNum, w, rand, packet)
178 if err != nil {
179 return err
180 }
181 if err = w.Flush(); err != nil {
182 return err
183 }
184 s.seqNum++
185 if changeKeys {
186 select {
187 case cipher := <-s.pendingKeyChange:
188 s.packetCipher = cipher
189 default:
190 panic("ssh: no key material for msgNewKeys")
191 }
192 }
193 return err
194}
195
196func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport {
197 t := &transport{
198 bufReader: bufio.NewReader(rwc),
199 bufWriter: bufio.NewWriter(rwc),
200 rand: rand,
201 reader: connectionState{
202 packetCipher: &streamPacketCipher{cipher: noneCipher{}},
203 pendingKeyChange: make(chan packetCipher, 1),
204 },
205 writer: connectionState{
206 packetCipher: &streamPacketCipher{cipher: noneCipher{}},
207 pendingKeyChange: make(chan packetCipher, 1),
208 },
209 Closer: rwc,
210 }
211 t.isClient = isClient
212
213 if isClient {
214 t.reader.dir = serverKeys
215 t.writer.dir = clientKeys
216 } else {
217 t.reader.dir = clientKeys
218 t.writer.dir = serverKeys
219 }
220
221 return t
222}
223
224type direction struct {
225 ivTag []byte
226 keyTag []byte
227 macKeyTag []byte
228}
229
230var (
231 serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}}
232 clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}}
233)
234
235// generateKeys generates key material for IV, MAC and encryption.
236func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) {
237 cipherMode := cipherModes[algs.Cipher]
238 macMode := macModes[algs.MAC]
239
240 iv = make([]byte, cipherMode.ivSize)
241 key = make([]byte, cipherMode.keySize)
242 macKey = make([]byte, macMode.keySize)
243
244 generateKeyMaterial(iv, d.ivTag, kex)
245 generateKeyMaterial(key, d.keyTag, kex)
246 generateKeyMaterial(macKey, d.macKeyTag, kex)
247 return
248}
249
250// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as
251// described in RFC 4253, section 6.4. direction should either be serverKeys
252// (to setup server->client keys) or clientKeys (for client->server keys).
253func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
254 iv, key, macKey := generateKeys(d, algs, kex)
255
256 if algs.Cipher == gcmCipherID {
257 return newGCMCipher(iv, key, macKey)
258 }
259
260 if algs.Cipher == aes128cbcID {
261 return newAESCBCCipher(iv, key, macKey, algs)
262 }
263
264 if algs.Cipher == tripledescbcID {
265 return newTripleDESCBCCipher(iv, key, macKey, algs)
266 }
267
268 c := &streamPacketCipher{
269 mac: macModes[algs.MAC].new(macKey),
270 etm: macModes[algs.MAC].etm,
271 }
272 c.macResult = make([]byte, c.mac.Size())
273
274 var err error
275 c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv)
276 if err != nil {
277 return nil, err
278 }
279
280 return c, nil
281}
282
283// generateKeyMaterial fills out with key material generated from tag, K, H
284// and sessionId, as specified in RFC 4253, section 7.2.
285func generateKeyMaterial(out, tag []byte, r *kexResult) {
286 var digestsSoFar []byte
287
288 h := r.Hash.New()
289 for len(out) > 0 {
290 h.Reset()
291 h.Write(r.K)
292 h.Write(r.H)
293
294 if len(digestsSoFar) == 0 {
295 h.Write(tag)
296 h.Write(r.SessionID)
297 } else {
298 h.Write(digestsSoFar)
299 }
300
301 digest := h.Sum(nil)
302 n := copy(out, digest)
303 out = out[n:]
304 if len(out) > 0 {
305 digestsSoFar = append(digestsSoFar, digest...)
306 }
307 }
308}
309
310const packageVersion = "SSH-2.0-Go"
311
312// Sends and receives a version line. The versionLine string should
313// be US ASCII, start with "SSH-2.0-", and should not include a
314// newline. exchangeVersions returns the other side's version line.
315func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) {
316 // Contrary to the RFC, we do not ignore lines that don't
317 // start with "SSH-2.0-" to make the library usable with
318 // nonconforming servers.
319 for _, c := range versionLine {
320 // The spec disallows non US-ASCII chars, and
321 // specifically forbids null chars.
322 if c < 32 {
323 return nil, errors.New("ssh: junk character in version line")
324 }
325 }
326 if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil {
327 return
328 }
329
330 them, err = readVersion(rw)
331 return them, err
332}
333
334// maxVersionStringBytes is the maximum number of bytes that we'll
335// accept as a version string. RFC 4253 section 4.2 limits this at 255
336// chars
337const maxVersionStringBytes = 255
338
339// Read version string as specified by RFC 4253, section 4.2.
340func readVersion(r io.Reader) ([]byte, error) {
341 versionString := make([]byte, 0, 64)
342 var ok bool
343 var buf [1]byte
344
345 for len(versionString) < maxVersionStringBytes {
346 _, err := io.ReadFull(r, buf[:])
347 if err != nil {
348 return nil, err
349 }
350 // The RFC says that the version should be terminated with \r\n
351 // but several SSH servers actually only send a \n.
352 if buf[0] == '\n' {
353 ok = true
354 break
355 }
356
357 // non ASCII chars are disallowed, but we are lenient,
358 // since Go doesn't use null-terminated strings.
359
360 // The RFC allows a comment after a space, however,
361 // all of it (version and comments) goes into the
362 // session hash.
363 versionString = append(versionString, buf[0])
364 }
365
366 if !ok {
367 return nil, errors.New("ssh: overflow reading version string")
368 }
369
370 // There might be a '\r' on the end which we should remove.
371 if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' {
372 versionString = versionString[:len(versionString)-1]
373 }
374 return versionString, nil
375}
diff --git a/vendor/vendor.json b/vendor/vendor.json
new file mode 100644
index 0000000..8377e94
--- /dev/null
+++ b/vendor/vendor.json
@@ -0,0 +1,538 @@
1{
2 "comment": "",
3 "ignore": "test",
4 "package": [
5 {
6 "checksumSHA1": "FIL83loX9V9APvGQIjJpbxq53F0=",
7 "path": "github.com/apparentlymart/go-cidr/cidr",
8 "revision": "7e4b007599d4e2076d9a81be723b3912852dda2c",
9 "revisionTime": "2017-04-18T07:21:50Z"
10 },
11 {
12 "checksumSHA1": "YKM6cWvi6ApzANaRZZJcQldOZH4=",
13 "path": "github.com/aws/aws-sdk-go/aws",
14 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
15 "revisionTime": "2017-05-09T17:42:03Z",
16 "version": "v1.8.21",
17 "versionExact": "v1.8.21"
18 },
19 {
20 "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=",
21 "path": "github.com/aws/aws-sdk-go/aws/awserr",
22 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
23 "revisionTime": "2017-05-09T17:42:03Z",
24 "version": "v1.8.21",
25 "versionExact": "v1.8.21"
26 },
27 {
28 "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=",
29 "path": "github.com/aws/aws-sdk-go/aws/awsutil",
30 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
31 "revisionTime": "2017-05-09T17:42:03Z",
32 "version": "v1.8.21",
33 "versionExact": "v1.8.21"
34 },
35 {
36 "checksumSHA1": "lSxSARUjHuYCz1/axwEuQ7IiGxk=",
37 "path": "github.com/aws/aws-sdk-go/aws/client",
38 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
39 "revisionTime": "2017-05-09T17:42:03Z",
40 "version": "v1.8.21",
41 "versionExact": "v1.8.21"
42 },
43 {
44 "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=",
45 "path": "github.com/aws/aws-sdk-go/aws/client/metadata",
46 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
47 "revisionTime": "2017-05-09T17:42:03Z",
48 "version": "v1.8.21",
49 "versionExact": "v1.8.21"
50 },
51 {
52 "checksumSHA1": "uPsFA3K/51L3fy0FgMCoSGsiAoc=",
53 "path": "github.com/aws/aws-sdk-go/aws/corehandlers",
54 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
55 "revisionTime": "2017-05-09T17:42:03Z",
56 "version": "v1.8.21",
57 "versionExact": "v1.8.21"
58 },
59 {
60 "checksumSHA1": "F52sZ5zdDeALnul8vxcodVchWi0=",
61 "path": "github.com/aws/aws-sdk-go/aws/credentials",
62 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
63 "revisionTime": "2017-05-09T17:42:03Z",
64 "version": "v1.8.21",
65 "versionExact": "v1.8.21"
66 },
67 {
68 "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=",
69 "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
70 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
71 "revisionTime": "2017-05-09T17:42:03Z",
72 "version": "v1.8.21",
73 "versionExact": "v1.8.21"
74 },
75 {
76 "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=",
77 "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
78 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
79 "revisionTime": "2017-05-09T17:42:03Z",
80 "version": "v1.8.21",
81 "versionExact": "v1.8.21"
82 },
83 {
84 "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=",
85 "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
86 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
87 "revisionTime": "2017-05-09T17:42:03Z",
88 "version": "v1.8.21",
89 "versionExact": "v1.8.21"
90 },
91 {
92 "checksumSHA1": "k4IMA27NIDHgZgvBxrKyJy16Y20=",
93 "path": "github.com/aws/aws-sdk-go/aws/defaults",
94 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
95 "revisionTime": "2017-05-09T17:42:03Z",
96 "version": "v1.8.21",
97 "versionExact": "v1.8.21"
98 },
99 {
100 "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=",
101 "path": "github.com/aws/aws-sdk-go/aws/ec2metadata",
102 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
103 "revisionTime": "2017-05-09T17:42:03Z",
104 "version": "v1.8.21",
105 "versionExact": "v1.8.21"
106 },
107 {
108 "checksumSHA1": "M3m80XNHPV23xy6lIrjxFwHyXhc=",
109 "path": "github.com/aws/aws-sdk-go/aws/endpoints",
110 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
111 "revisionTime": "2017-05-09T17:42:03Z",
112 "version": "v1.8.21",
113 "versionExact": "v1.8.21"
114 },
115 {
116 "checksumSHA1": "exvPEmKspW+/+YOa1E+SszFf2EA=",
117 "path": "github.com/aws/aws-sdk-go/aws/request",
118 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
119 "revisionTime": "2017-05-09T17:42:03Z",
120 "version": "v1.8.21",
121 "versionExact": "v1.8.21"
122 },
123 {
124 "checksumSHA1": "sxShwDYt1duG922FOwU0/hbu/uc=",
125 "path": "github.com/aws/aws-sdk-go/aws/session",
126 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
127 "revisionTime": "2017-05-09T17:42:03Z",
128 "version": "v1.8.21",
129 "versionExact": "v1.8.21"
130 },
131 {
132 "checksumSHA1": "SvIsunO8D9MEKbetMENA4WRnyeE=",
133 "path": "github.com/aws/aws-sdk-go/aws/signer/v4",
134 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
135 "revisionTime": "2017-05-09T17:42:03Z",
136 "version": "v1.8.21",
137 "versionExact": "v1.8.21"
138 },
139 {
140 "checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=",
141 "path": "github.com/aws/aws-sdk-go/private/protocol",
142 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
143 "revisionTime": "2017-05-09T17:42:03Z",
144 "version": "v1.8.21",
145 "versionExact": "v1.8.21"
146 },
147 {
148 "checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=",
149 "path": "github.com/aws/aws-sdk-go/private/protocol/query",
150 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
151 "revisionTime": "2017-05-09T17:42:03Z",
152 "version": "v1.8.21",
153 "versionExact": "v1.8.21"
154 },
155 {
156 "checksumSHA1": "Drt1JfLMa0DQEZLWrnMlTWaIcC8=",
157 "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
158 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
159 "revisionTime": "2017-05-09T17:42:03Z",
160 "version": "v1.8.21",
161 "versionExact": "v1.8.21"
162 },
163 {
164 "checksumSHA1": "VCTh+dEaqqhog5ncy/WTt9+/gFM=",
165 "path": "github.com/aws/aws-sdk-go/private/protocol/rest",
166 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
167 "revisionTime": "2017-05-09T17:42:03Z",
168 "version": "v1.8.21",
169 "versionExact": "v1.8.21"
170 },
171 {
172 "checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=",
173 "path": "github.com/aws/aws-sdk-go/private/protocol/restxml",
174 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
175 "revisionTime": "2017-05-09T17:42:03Z",
176 "version": "v1.8.21",
177 "versionExact": "v1.8.21"
178 },
179 {
180 "checksumSHA1": "0qYPUga28aQVkxZgBR3Z86AbGUQ=",
181 "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
182 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
183 "revisionTime": "2017-05-09T17:42:03Z",
184 "version": "v1.8.21",
185 "versionExact": "v1.8.21"
186 },
187 {
188 "checksumSHA1": "krqUUMDYRN2ohYcumxZl8BTR5EQ=",
189 "path": "github.com/aws/aws-sdk-go/service/s3",
190 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
191 "revisionTime": "2017-05-09T17:42:03Z",
192 "version": "v1.8.21",
193 "versionExact": "v1.8.21"
194 },
195 {
196 "checksumSHA1": "fbROB+q5aRgvH79KOnEqr63ahRE=",
197 "path": "github.com/aws/aws-sdk-go/service/sts",
198 "revision": "49c7a5e645b5eca5aabd1fd6a676dbddaf7b2a1a",
199 "revisionTime": "2017-05-09T17:42:03Z",
200 "version": "v1.8.21",
201 "versionExact": "v1.8.21"
202 },
203 {
204 "checksumSHA1": "nqw2Qn5xUklssHTubS5HDvEL9L4=",
205 "path": "github.com/bgentry/go-netrc/netrc",
206 "revision": "9fd32a8b3d3d3f9d43c341bfe098430e07609480",
207 "revisionTime": "2014-04-22T17:41:19Z"
208 },
209 {
210 "checksumSHA1": "dvabztWVQX8f6oMLRyv4dLH+TGY=",
211 "path": "github.com/davecgh/go-spew/spew",
212 "revision": "346938d642f2ec3594ed81d874461961cd0faa76",
213 "revisionTime": "2016-10-29T20:57:26Z"
214 },
215 {
216 "checksumSHA1": "1K+xrZ1PBez190iGt5OnMtGdih4=",
217 "comment": "v1.8.6",
218 "path": "github.com/go-ini/ini",
219 "revision": "766e555c68dc8bda90d197ee8946c37519c19409",
220 "revisionTime": "2017-01-17T13:00:17Z"
221 },
222 {
223 "checksumSHA1": "cdOCt0Yb+hdErz8NAQqayxPmRsY=",
224 "path": "github.com/hashicorp/errwrap",
225 "revision": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
226 },
227 {
228 "checksumSHA1": "nsL2kI426RMuq1jw15e7igFqdIY=",
229 "path": "github.com/hashicorp/go-getter",
230 "revision": "c3d66e76678dce180a7b452653472f949aedfbcd",
231 "revisionTime": "2017-02-07T21:55:32Z"
232 },
233 {
234 "checksumSHA1": "9J+kDr29yDrwsdu2ULzewmqGjpA=",
235 "path": "github.com/hashicorp/go-getter/helper/url",
236 "revision": "c3d66e76678dce180a7b452653472f949aedfbcd",
237 "revisionTime": "2017-02-07T21:55:32Z"
238 },
239 {
240 "checksumSHA1": "lrSl49G23l6NhfilxPM0XFs5rZo=",
241 "path": "github.com/hashicorp/go-multierror",
242 "revision": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
243 },
244 {
245 "checksumSHA1": "b0nQutPMJHeUmz4SjpreotAo6Yk=",
246 "path": "github.com/hashicorp/go-plugin",
247 "revision": "f72692aebca2008343a9deb06ddb4b17f7051c15",
248 "revisionTime": "2017-02-17T16:27:05Z"
249 },
250 {
251 "checksumSHA1": "85XUnluYJL7F55ptcwdmN8eSOsk=",
252 "path": "github.com/hashicorp/go-uuid",
253 "revision": "36289988d83ca270bc07c234c36f364b0dd9c9a7"
254 },
255 {
256 "checksumSHA1": "EcZfls6vcqjasWV/nBlu+C+EFmc=",
257 "path": "github.com/hashicorp/go-version",
258 "revision": "e96d3840402619007766590ecea8dd7af1292276",
259 "revisionTime": "2016-10-31T18:26:05Z"
260 },
261 {
262 "checksumSHA1": "o3XZZdOnSnwQSpYw215QV75ZDeI=",
263 "path": "github.com/hashicorp/hcl",
264 "revision": "a4b07c25de5ff55ad3b8936cea69a79a3d95a855",
265 "revisionTime": "2017-05-04T19:02:34Z"
266 },
267 {
268 "checksumSHA1": "XQmjDva9JCGGkIecOgwtBEMCJhU=",
269 "path": "github.com/hashicorp/hcl/hcl/ast",
270 "revision": "a4b07c25de5ff55ad3b8936cea69a79a3d95a855",
271 "revisionTime": "2017-05-04T19:02:34Z"
272 },
273 {
274 "checksumSHA1": "teokXoyRXEJ0vZHOWBD11l5YFNI=",
275 "path": "github.com/hashicorp/hcl/hcl/parser",
276 "revision": "a4b07c25de5ff55ad3b8936cea69a79a3d95a855",
277 "revisionTime": "2017-05-04T19:02:34Z"
278 },
279 {
280 "checksumSHA1": "z6wdP4mRw4GVjShkNHDaOWkbxS0=",
281 "path": "github.com/hashicorp/hcl/hcl/scanner",
282 "revision": "a4b07c25de5ff55ad3b8936cea69a79a3d95a855",
283 "revisionTime": "2017-05-04T19:02:34Z"
284 },
285 {
286 "checksumSHA1": "oS3SCN9Wd6D8/LG0Yx1fu84a7gI=",
287 "path": "github.com/hashicorp/hcl/hcl/strconv",
288 "revision": "a4b07c25de5ff55ad3b8936cea69a79a3d95a855",
289 "revisionTime": "2017-05-04T19:02:34Z"
290 },
291 {
292 "checksumSHA1": "c6yprzj06ASwCo18TtbbNNBHljA=",
293 "path": "github.com/hashicorp/hcl/hcl/token",
294 "revision": "a4b07c25de5ff55ad3b8936cea69a79a3d95a855",
295 "revisionTime": "2017-05-04T19:02:34Z"
296 },
297 {
298 "checksumSHA1": "PwlfXt7mFS8UYzWxOK5DOq0yxS0=",
299 "path": "github.com/hashicorp/hcl/json/parser",
300 "revision": "a4b07c25de5ff55ad3b8936cea69a79a3d95a855",
301 "revisionTime": "2017-05-04T19:02:34Z"
302 },
303 {
304 "checksumSHA1": "YdvFsNOMSWMLnY6fcliWQa0O5Fw=",
305 "path": "github.com/hashicorp/hcl/json/scanner",
306 "revision": "a4b07c25de5ff55ad3b8936cea69a79a3d95a855",
307 "revisionTime": "2017-05-04T19:02:34Z"
308 },
309 {
310 "checksumSHA1": "fNlXQCQEnb+B3k5UDL/r15xtSJY=",
311 "path": "github.com/hashicorp/hcl/json/token",
312 "revision": "a4b07c25de5ff55ad3b8936cea69a79a3d95a855",
313 "revisionTime": "2017-05-04T19:02:34Z"
314 },
315 {
316 "checksumSHA1": "M09yxoBoCEtG7EcHR8aEWLzMMJc=",
317 "path": "github.com/hashicorp/hil",
318 "revision": "fac2259da677551de1fb92b844c4d020a38d8468",
319 "revisionTime": "2017-05-12T21:33:05Z"
320 },
321 {
322 "checksumSHA1": "0S0KeBcfqVFYBPeZkuJ4fhQ5mCA=",
323 "path": "github.com/hashicorp/hil/ast",
324 "revision": "fac2259da677551de1fb92b844c4d020a38d8468",
325 "revisionTime": "2017-05-12T21:33:05Z"
326 },
327 {
328 "checksumSHA1": "P5PZ3k7SmqWmxgJ8Q0gLzeNpGhE=",
329 "path": "github.com/hashicorp/hil/parser",
330 "revision": "fac2259da677551de1fb92b844c4d020a38d8468",
331 "revisionTime": "2017-05-12T21:33:05Z"
332 },
333 {
334 "checksumSHA1": "DC1k5kOua4oFqmo+JRt0YzfP44o=",
335 "path": "github.com/hashicorp/hil/scanner",
336 "revision": "fac2259da677551de1fb92b844c4d020a38d8468",
337 "revisionTime": "2017-05-12T21:33:05Z"
338 },
339 {
340 "checksumSHA1": "vt+P9D2yWDO3gdvdgCzwqunlhxU=",
341 "path": "github.com/hashicorp/logutils",
342 "revision": "0dc08b1671f34c4250ce212759ebd880f743d883",
343 "revisionTime": "2015-06-09T07:04:31Z"
344 },
345 {
346 "checksumSHA1": "s8P1exD/wtgbfvLKAb6LT68D+vs=",
347 "path": "github.com/hashicorp/terraform/config",
348 "revision": "v0.9.5",
349 "revisionTime": "2017-05-18T13:55:41Z",
350 "version": "v0.9.5"
351 },
352 {
353 "checksumSHA1": "YiREjXkb7CDMZuUmkPGK0yySe8A=",
354 "path": "github.com/hashicorp/terraform/config/module",
355 "revision": "v0.9.5",
356 "revisionTime": "2017-05-18T13:55:41Z",
357 "version": "v0.9.5"
358 },
359 {
360 "checksumSHA1": "Iz6xWERlntUkpdLo6z2OIguMwu0=",
361 "path": "github.com/hashicorp/terraform/dag",
362 "revision": "v0.9.5",
363 "revisionTime": "2017-05-18T13:55:41Z",
364 "version": "v0.9.5"
365 },
366 {
367 "checksumSHA1": "p4y7tbu9KD/3cKQKe92I3DyjgRc=",
368 "path": "github.com/hashicorp/terraform/flatmap",
369 "revision": "v0.9.5",
370 "revisionTime": "2017-05-18T13:55:41Z",
371 "version": "v0.9.5"
372 },
373 {
374 "checksumSHA1": "ZcY9YbDucbey7sESh+e5JgU+sI4=",
375 "path": "github.com/hashicorp/terraform/helper/acctest",
376 "revision": "v0.9.5",
377 "revisionTime": "2017-05-18T13:55:41Z",
378 "version": "v0.9.5"
379 },
380 {
381 "checksumSHA1": "uT6Q9RdSRAkDjyUgQlJ2XKJRab4=",
382 "path": "github.com/hashicorp/terraform/helper/config",
383 "revision": "v0.9.5",
384 "revisionTime": "2017-05-18T13:55:41Z",
385 "version": "v0.9.5"
386 },
387 {
388 "checksumSHA1": "Vbo55GDzPgG/L/+W2pcvDhxrPZc=",
389 "path": "github.com/hashicorp/terraform/helper/experiment",
390 "revision": "v0.9.5",
391 "revisionTime": "2017-05-18T13:55:41Z",
392 "version": "v0.9.5"
393 },
394 {
395 "checksumSHA1": "BmIPKTr0zDutSJdyq7pYXrK1I3E=",
396 "path": "github.com/hashicorp/terraform/helper/hashcode",
397 "revision": "v0.9.5",
398 "revisionTime": "2017-05-18T13:55:41Z",
399 "version": "v0.9.5"
400 },
401 {
402 "checksumSHA1": "B267stWNQd0/pBTXHfI/tJsxzfc=",
403 "path": "github.com/hashicorp/terraform/helper/hilmapstructure",
404 "revision": "v0.9.5",
405 "revisionTime": "2017-05-18T13:55:41Z",
406 "version": "v0.9.5"
407 },
408 {
409 "checksumSHA1": "2wJa9F3BGlbe2DNqH5lb5POayRI=",
410 "path": "github.com/hashicorp/terraform/helper/logging",
411 "revision": "v0.9.5",
412 "revisionTime": "2017-05-18T13:55:41Z",
413 "version": "v0.9.5"
414 },
415 {
416 "checksumSHA1": "lizCn3wKWKhtaFz/Vxq/icNUfZE=",
417 "path": "github.com/hashicorp/terraform/helper/resource",
418 "revision": "v0.9.5",
419 "revisionTime": "2017-05-18T13:55:41Z",
420 "version": "v0.9.5"
421 },
422 {
423 "checksumSHA1": "cOp4qFx3FPNQKymAzGnMyvyo+Vk=",
424 "path": "github.com/hashicorp/terraform/helper/schema",
425 "revision": "v0.9.5",
426 "revisionTime": "2017-05-18T13:55:41Z",
427 "version": "v0.9.5"
428 },
429 {
430 "checksumSHA1": "oLui7dYxhzfAczwwdNZDm4tzHtk=",
431 "path": "github.com/hashicorp/terraform/helper/shadow",
432 "revision": "v0.9.5",
433 "revisionTime": "2017-05-18T13:55:41Z",
434 "version": "v0.9.5"
435 },
436 {
437 "checksumSHA1": "Fzbv+N7hFXOtrR6E7ZcHT3jEE9s=",
438 "path": "github.com/hashicorp/terraform/helper/structure",
439 "revision": "v0.9.5",
440 "revisionTime": "2017-05-18T13:55:41Z",
441 "version": "v0.9.5"
442 },
443 {
444 "checksumSHA1": "Q3gCaw8WD99fSYwYZlXJ2+MVQh4=",
445 "path": "github.com/hashicorp/terraform/helper/validation",
446 "revision": "v0.9.5",
447 "revisionTime": "2017-05-18T13:55:41Z",
448 "version": "v0.9.5"
449 },
450 {
451 "checksumSHA1": "6AA7ZAzswfl7SOzleP6e6he0lq4=",
452 "path": "github.com/hashicorp/terraform/plugin",
453 "revision": "v0.9.5",
454 "revisionTime": "2017-05-18T13:55:41Z",
455 "version": "v0.9.5"
456 },
457 {
458 "checksumSHA1": "aJCSHitk1DIPdKNixjDdfGig/cQ=",
459 "path": "github.com/hashicorp/terraform/terraform",
460 "revision": "v0.9.5",
461 "revisionTime": "2017-05-18T13:55:41Z",
462 "version": "v0.9.5"
463 },
464 {
465 "checksumSHA1": "ZhK6IO2XN81Y+3RAjTcVm1Ic7oU=",
466 "path": "github.com/hashicorp/yamux",
467 "revision": "d1caa6c97c9fc1cc9e83bbe34d0603f9ff0ce8bd",
468 "revisionTime": "2016-07-20T23:31:40Z"
469 },
470 {
471 "checksumSHA1": "0ZrwvB6KoGPj2PoDNSEJwxQ6Mog=",
472 "comment": "0.2.2-2-gc01cf91",
473 "path": "github.com/jmespath/go-jmespath",
474 "revision": "bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d",
475 "revisionTime": "2016-08-03T19:07:31Z"
476 },
477 {
478 "checksumSHA1": "guxbLo8KHHBeM0rzou4OTzzpDNs=",
479 "path": "github.com/mitchellh/copystructure",
480 "revision": "5af94aef99f597e6a9e1f6ac6be6ce0f3c96b49d",
481 "revisionTime": "2016-10-13T19:53:42Z"
482 },
483 {
484 "checksumSHA1": "V/quM7+em2ByJbWBLOsEwnY3j/Q=",
485 "path": "github.com/mitchellh/go-homedir",
486 "revision": "b8bc1bf767474819792c23f32d8286a45736f1c6",
487 "revisionTime": "2016-12-03T19:45:07Z"
488 },
489 {
490 "checksumSHA1": "xyoJKalfQwTUN1qzZGQKWYAwl0A=",
491 "path": "github.com/mitchellh/hashstructure",
492 "revision": "6b17d669fac5e2f71c16658d781ec3fdd3802b69"
493 },
494 {
495 "checksumSHA1": "MlX15lJuV8DYARX5RJY8rqrSEWQ=",
496 "path": "github.com/mitchellh/mapstructure",
497 "revision": "53818660ed4955e899c0bcafa97299a388bd7c8e",
498 "revisionTime": "2017-03-07T20:11:23Z"
499 },
500 {
501 "checksumSHA1": "vBpuqNfSTZcAR/0tP8tNYacySGs=",
502 "path": "github.com/mitchellh/reflectwalk",
503 "revision": "92573fe8d000a145bfebc03a16bc22b34945867f",
504 "revisionTime": "2016-10-03T17:45:16Z"
505 },
506 {
507 "checksumSHA1": "zmC8/3V4ls53DJlNTKDZwPSC/dA=",
508 "path": "github.com/satori/go.uuid",
509 "revision": "b061729afc07e77a8aa4fad0a2fd840958f1942a",
510 "revisionTime": "2016-09-27T10:08:44Z"
511 },
512 {
513 "checksumSHA1": "C1KKOxFoW7/W/NFNpiXK+boguNo=",
514 "path": "golang.org/x/crypto/curve25519",
515 "revision": "453249f01cfeb54c3d549ddb75ff152ca243f9d8",
516 "revisionTime": "2017-02-08T20:51:15Z"
517 },
518 {
519 "checksumSHA1": "wGb//LjBPNxYHqk+dcLo7BjPXK8=",
520 "path": "golang.org/x/crypto/ed25519",
521 "revision": "b8a2a83acfe6e6770b75de42d5ff4c67596675c0",
522 "revisionTime": "2017-01-13T19:21:00Z"
523 },
524 {
525 "checksumSHA1": "LXFcVx8I587SnWmKycSDEq9yvK8=",
526 "path": "golang.org/x/crypto/ed25519/internal/edwards25519",
527 "revision": "b8a2a83acfe6e6770b75de42d5ff4c67596675c0",
528 "revisionTime": "2017-01-13T19:21:00Z"
529 },
530 {
531 "checksumSHA1": "fsrFs762jlaILyqqQImS1GfvIvw=",
532 "path": "golang.org/x/crypto/ssh",
533 "revision": "453249f01cfeb54c3d549ddb75ff152ca243f9d8",
534 "revisionTime": "2017-02-08T20:51:15Z"
535 }
536 ],
537 "rootPath": "github.com/terraform-providers/terraform-provider-statuscake"
538}